Ejemplo n.º 1
0
 def map(self, image_id, image_binary):
     try:
         boxes = self.image_box_fns[image_id]
     except KeyError:
         pass
     else:
         image = imfeat.image_fromstring(image_binary)
         size_array = np.array([
             image.shape[0], image.shape[1], image.shape[0], image.shape[1]
         ])
         scale_boxes = {}
         for box, fn in boxes:
             fbox = size_array * box
             scale = int(
                 np.round(np.log2(
                     (fbox[2] - fbox[0]) / feature.PATCH_SIZE)))
             scale_check = int(
                 np.round(np.log2(
                     (fbox[3] - fbox[1]) / feature.PATCH_SIZE)))
             if scale != scale_check:
                 raise ValueError('Box is not square.')
             scale_boxes.setdefault(scale, []).append((box, fn))
         # Order boxes and fn's by scale
         for scale in range(max(scale_boxes.keys()) + 1):
             if scale > 0:
                 height, width = np.array(image.shape[:2]) / 2
                 image = image[:height * 2, :width * 2, :]
                 if min(width, height) < 1:
                     raise ValueError('Image is too small')
                 image = cv2.resize(image, (width, height))
             if image is None:  # NOTE(brandyn): It is too small
                 raise ValueError('Image is too small')
             try:
                 boxes = scale_boxes[scale]
             except KeyError:
                 continue
             size_array = np.array([
                 image.shape[0], image.shape[1], image.shape[0],
                 image.shape[1]
             ])
             for box, fn in boxes:
                 box = np.round(size_array * box).astype(np.int)
                 print(box)
                 if self.type == 'image':
                     image_box = np.ascontiguousarray(
                         image[box[0]:box[2], box[1]:box[3], :])
                     yield fn, imfeat.image_tostring(image_box, 'png')
                 elif self.type == 'feature':
                     image_box = np.ascontiguousarray(
                         image[box[0]:box[2], box[1]:box[3], :])
                     yield fn, feature.compute_patch(image_box)
                 elif self.type == 'box':
                     image2 = image.copy()
                     cv2.rectangle(image2, (box[1], box[0]),
                                   (box[3], box[2]), (0, 255, 0), 4)
                     yield fn, imfeat.image_tostring(image2, 'jpg')
                 else:
                     raise ValueError(self.type)
def _action_handle(function, params, image):
    print('Action[%s]' % function)
    try:
        ff = FEATURE_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        return {'feature': ff(**params)(image).tolist()}
    except KeyError:
        pass
    try:
        sf = SEARCH_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        print(sf.feature)
        out = {'results': sf.analyze_cropped(image)}
        if function == 'see/search/masks':
            out['classes'] = CLASS_COLORS
        return out
    except KeyError:
        pass
    try:
        cf = CLASSIFY_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        return {'results': cf(image)}
    except KeyError:
        pass
    if function == 'see/texton' or function == 'see/texton_ilp':
        image = cv2.resize(image, (320, int(image.shape[0] * 320. / image.shape[1])))
        image = np.ascontiguousarray(image)
        semantic_masks = TEXTON(image)
        if function == 'see/texton_ilp':
            ilp_pred = CLASSIFY_FUN['see/classify/indoor'](imfeat.resize_image_max_side(image, 320))
            try:
                bin_index = [x for x, y in enumerate(ILP_WEIGHTS['bins']) if y >= ilp_pred][0]
            except IndexError:
                bin_index = ILP_WEIGHTS['ilp_tables'].shape[1]
            if bin_index != 0:
                bin_index -= 1
            ilp_weights = ILP_WEIGHTS['ilp_tables'][:, bin_index]
            print('ILP Pred[%s] Weights[%s]' % (ilp_pred, ilp_weights))
            semantic_masks *= ilp_weights
        #min_probability = float(params.get('min_probability', 0.5))
        #semantic_masks = np.dstack([semantic_masks, np.ones_like(semantic_masks[:, :, 0]) * min_probability])
        texton_argmax2 = np.argmax(semantic_masks, 2)
        image_string = imfeat.image_tostring(COLORS_BGR[texton_argmax2], 'png')
        out = {'argmax_pngb64': base64.b64encode(image_string)}
        out['classes'] = CLASS_COLORS
        return out
    if function == 'see/colors':
        image = cv2.resize(image, (320, int(image.shape[0] * 320. / image.shape[1])))
        image = np.ascontiguousarray(image)
        masks = COLOR_NAMING.make_feature_mask(image)
        mask_argmax = np.argmax(masks, 2)
        image_string = imfeat.image_tostring(COLOR_NAMING.color_values[mask_argmax], 'png')
        return {'argmax_pngb64': base64.b64encode(image_string)}
    if function == 'see/faces':
        results = [map(float, x) for x in FACES._detect_faces(image)]
        return {'faces': [{'tl_x': x[0], 'tl_y': x[1], 'width': x[2], 'height': x[3]}
                          for x in results]}
    return {}
Ejemplo n.º 3
0
 def test_tostring(self):
     for fn in ['lena.jpg', 'lena.pgm', 'lena.ppm']:
         for i in load_images(fn):
             for ext in ['jpeg', 'png']:
                 o = imfeat.image_tostring(i, ext)
                 o2 = imfeat.image_tostring(imfeat.image_fromstring(imfeat.image_tostring(i, ext)), ext)
                 if ext == 'png':
                     np.testing.assert_equal(o, o2)
                 s = imfeat.image_fromstring(o)
                 s2 = imfeat.image_fromstring(o2)
                 # No more than 9% of the pixels differ by more than 3
                 self.assertLess(np.mean(np.abs(s - s2) > 3), .09)
Ejemplo n.º 4
0
 def test_tostring(self):
     for fn in ['lena.jpg', 'lena.pgm', 'lena.ppm']:
         for i in load_images(fn):
             for ext in ['jpeg', 'png']:
                 o = imfeat.image_tostring(i, ext)
                 o2 = imfeat.image_tostring(imfeat.image_fromstring(imfeat.image_tostring(i, ext)), ext)
                 if ext == 'png':
                     np.testing.assert_equal(o, o2)
                 s = imfeat.image_fromstring(o)
                 s2 = imfeat.image_fromstring(o2)
                 # No more than 9% of the pixels differ by more than 3
                 self.assertLess(np.mean(np.abs(s - s2) > 3), .09)
Ejemplo n.º 5
0
def main():
    exemplars = sorted(pickle.load(open('exemplars.pkl')), key=lambda x: x[0][2], reverse=True)[:100]
    with open('exemplars_best.pkl', 'w') as fp:
        pickle.dump(exemplars, fp, -1)
    hdfs_output = 'exemplarbank/output/%s/' % '1341790878.92'
    #hadoopy.launch_frozen('/user/brandyn/aladdin_results/keyframe/9/keyframe', hdfs_output + 'frame_pred', 'predict_video_frame.py', cmdenvs=['EXEMPLARS=exemplars_best.pkl', 'CELL_SKIP=1'], remove_output=True, files=['exemplars_best.pkl'])
    local_out = 'frame_preds/'
    try:
        shutil.rmtree(local_out)
    except OSError:
        pass
    os.makedirs(local_out)
    for num, (data, (pyramid, num_boxes)) in enumerate(hadoopy.readtb(hdfs_output + 'frame_pred')):
        if np.sum(pyramid):
            pyramid_norm = pyramid / float(num_boxes)
            pyramid_prob = np.sqrt(pyramid / float(np.max(pyramid)))
            p = np.sum(pyramid_norm)
            f = imfeat.image_fromstring(data['frame'])
            pyramid_prob_frame = cv2.resize(pyramid_prob, (f.shape[1], f.shape[0]))
            pyramid_prob_frame_color = COLORS[(pyramid_prob_frame * 255).astype(np.int), :]
            alpha = .5
            beta = alpha * pyramid_prob_frame
            beta = beta.reshape((beta.shape[0], beta.shape[1], 1))
        else:
            beta = 0.
        f = ((1 - beta) * f + beta * pyramid_prob_frame_color).astype(np.uint8)
        print(p)
        open(local_out + '%f-%d.jpg' % (p, num), 'w').write(imfeat.image_tostring(f, 'jpg'))
Ejemplo n.º 6
0
    def map(self, key, value):
        """
        Args:
            key: Image name
            value: Image as jpeg byte data

        Yields:
            A tuple in the form of (key, value)
            key: (Image name, (x, y, w, h))
            value: face image (.png)
        """
        try:
            image = imfeat.image_fromstring(value, {
                'type': 'numpy',
                'dtype': 'uint8',
                'mode': 'gray'
            })
            image_color = imfeat.image_fromstring(value, {
                'type': 'numpy',
                'dtype': 'uint8',
                'mode': 'bgr'
            })
        except:
            hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
            return
        faces = _detect_faces(image, self._cascade)
        for x, y, w, h in faces:
            yield (key, (x, y, w, h)), imfeat.image_tostring(
                image_color[y:y + h, x:x + w, :], '.png')
def image():
    print_request()
    # TODO(Cleanup)
    image, params = _get_image()
    image = imfeat.resize_image_max_side(image, 320)
    image_string = imfeat.image_tostring(image, 'jpg')
    return {'jpgb64': base64.b64encode(image_string)}
Ejemplo n.º 8
0
def image():
    print_request()
    # TODO(Cleanup)
    image, params = _get_image()
    image = imfeat.resize_image_max_side(image, 320)
    image_string = imfeat.image_tostring(image, 'jpg')
    return {'jpgb64': base64.b64encode(image_string)}
Ejemplo n.º 9
0
    def map(self, event_filename, video_data):
        """

        Args:
            event_filename: Tuple of (event, filename)
            video_data: Binary video data

        Yields:
            A tuple in the form of ((event, filename), value) where value is a dict
            with contents

            prev_frame_time:
            prev_frame_num:
            prev_frame:
            frame_time:
            frame_num:
            frame:
        """
        ext = '.' + event_filename[1].rsplit('.')[1]
        with tempfile.NamedTemporaryFile(suffix=ext) as fp:
            fp.write(video_data)
            fp.flush()
            prev_frame_time = None
            prev_frame_num = None
            prev_frame = None
            try:
                for (frame_num, frame_time, frame), iskeyframe in self.kf(
                        viderator.frame_iter(fp.name,
                                             frame_skip=self.frame_skip,
                                             frozen=True)):
                    if self.max_time < frame_time:
                        break
                    if iskeyframe and prev_frame is not None:
                        yield event_filename, {
                            'prev_frame_time': prev_frame_time,
                            'prev_frame_num': prev_frame_num,
                            'prev_frame':
                            imfeat.image_tostring(prev_frame, 'JPEG'),
                            'frame_time': frame_time,
                            'frame_num': frame_num,
                            'frame': imfeat.image_tostring(frame, 'JPEG')
                        }
                    prev_frame_num = frame_num
                    prev_frame = frame
            except Exception, e:
                print(e)
                hadoopy.counter('VIDEO_ERROR', 'FFMPEGCantParse')
Ejemplo n.º 10
0
 def map(self, image_id, image_binary):
     try:
         boxes = self.image_box_fns[image_id]
     except KeyError:
         pass
     else:
         image = imfeat.image_fromstring(image_binary)
         size_array = np.array([image.shape[0], image.shape[1], image.shape[0], image.shape[1]])
         scale_boxes = {}
         for box, fn in boxes:
             fbox = size_array * box
             scale = int(np.round(np.log2((fbox[2] - fbox[0]) / feature.PATCH_SIZE)))
             scale_check = int(np.round(np.log2((fbox[3] - fbox[1]) / feature.PATCH_SIZE)))
             if scale != scale_check:
                 raise ValueError("Box is not square.")
             scale_boxes.setdefault(scale, []).append((box, fn))
         # Order boxes and fn's by scale
         for scale in range(max(scale_boxes.keys()) + 1):
             if scale > 0:
                 height, width = np.array(image.shape[:2]) / 2
                 image = image[: height * 2, : width * 2, :]
                 if min(width, height) < 1:
                     raise ValueError("Image is too small")
                 image = cv2.resize(image, (width, height))
             if image is None:  # NOTE(brandyn): It is too small
                 raise ValueError("Image is too small")
             try:
                 boxes = scale_boxes[scale]
             except KeyError:
                 continue
             size_array = np.array([image.shape[0], image.shape[1], image.shape[0], image.shape[1]])
             for box, fn in boxes:
                 box = np.round(size_array * box).astype(np.int)
                 print(box)
                 if self.type == "image":
                     image_box = np.ascontiguousarray(image[box[0] : box[2], box[1] : box[3], :])
                     yield fn, imfeat.image_tostring(image_box, "png")
                 elif self.type == "feature":
                     image_box = np.ascontiguousarray(image[box[0] : box[2], box[1] : box[3], :])
                     yield fn, feature.compute_patch(image_box)
                 elif self.type == "box":
                     image2 = image.copy()
                     cv2.rectangle(image2, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 4)
                     yield fn, imfeat.image_tostring(image2, "jpg")
                 else:
                     raise ValueError(self.type)
Ejemplo n.º 11
0
    def reduce(self, key, values):
        """

        For this to work we need a modified partitioner on the substring before the first tab.
        This method operates on a single tile at level 0, and more tiles at higher zoom levels (4x each level).
        
        Args:
        key: (tile_id, subtile_id)
        values: Iterator of [dist, images] where images are power of 2 JPEG
            images in descending order by size

        Yields:
            Tuple of (key, value) where
            key: Tile name
            value: JPEG Image Data
        """
        # Select minimum distance image, throw away score, and order images from smallest to largest powers of 2
        self._sub_tiles[key] = min(values, key=lambda x: x[0])[1][::-1]
        # As the images were JPEG, we need to make them arrays again
        self._sub_tiles[key] = [
            imfeat.image_fromstring(x) for x in self._sub_tiles[key]
        ]
        # If we don't have all of the necessary subtiles.
        if len(self._sub_tiles) != _subtiles_per_tile:
            return
        self._verify_subtile_keys()
        for level in range(_levels):
            # Each image is smaller than the tile
            scale = 2**level
            num_tiles = scale * scale
            subtiles_per_tile_len = _subtiles_per_tile_length / scale
            subtiles_per_tile = subtiles_per_tile_len**2
            subtile_len = _subtile_length * scale
            cur_subtiles = [(self._find_output(key, scale,
                                               subtiles_per_tile_len,
                                               subtile_len), images[level])
                            for key, images in self._sub_tiles.items()]
            cur_subtiles.sort(key=lambda x: x[0])
            cur_tile = np.zeros((_tile_length, _tile_length, 3),
                                dtype=np.uint8)
            assert len(cur_subtiles) / subtiles_per_tile == num_tiles
            cur_outtile = None
            for subtile_ind, ((xouttile, youttile, xoffset, yoffset),
                              image) in enumerate(cur_subtiles):
                if cur_outtile is None:
                    cur_outtile = (xouttile, youttile)
                assert cur_outtile == (xouttile, youttile)
                #cur_tile.paste(image, (xoffset, yoffset))  # TODO Suspect
                cur_tile[yoffset:yoffset + image.shape[0],
                         xoffset:xoffset + image.shape[1], :] = image
                if not (subtile_ind + 1) % subtiles_per_tile:
                    tile_name = '%d_%d_%d.jpg' % (level, xouttile, youttile)
                    yield tile_name, imfeat.image_tostring(cur_tile, 'jpg')
                    cur_tile = np.zeros((_tile_length, _tile_length, 3),
                                        dtype=np.uint8)
                    cur_outtile = None
        self._sub_tiles = {}
    def map(self, event_filename, video_data):
        """

        Args:
            event_filename: Tuple of (event, filename)
            video_data: Binary video data

        Yields:
            A tuple in the form of ((event, filename), value) where value is a dict
            with contents

            prev_frame_time:
            prev_frame_num:
            prev_frame:
            frame_time:
            frame_num:
            frame:
        """
        ext = '.' + event_filename[1].rsplit('.')[1]
        with tempfile.NamedTemporaryFile(suffix=ext) as fp:
            fp.write(video_data)
            fp.flush()
            prev_frame_time = None
            prev_frame_num = None
            prev_frame = None
            try:
                for (frame_num, frame_time, frame), iskeyframe in self.kf(viderator.frame_iter(fp.name,
                                                                                               frame_skip=self.frame_skip,
                                                                                               frozen=True)):
                    if self.max_time < frame_time:
                        break
                    if iskeyframe and prev_frame is not None:
                        yield event_filename, {'prev_frame_time': prev_frame_time,
                                               'prev_frame_num': prev_frame_num,
                                               'prev_frame': imfeat.image_tostring(prev_frame, 'JPEG'),
                                               'frame_time': frame_time,
                                               'frame_num': frame_num,
                                               'frame': imfeat.image_tostring(frame, 'JPEG')}
                    prev_frame_num = frame_num
                    prev_frame = frame
            except Exception, e:
                print(e)
                hadoopy.counter('VIDEO_ERROR', 'FFMPEGCantParse')
Ejemplo n.º 13
0
def load_data_iter(local_inputs):
    # Push labeled samples to HDFS
    unique_ids = set()
    for fn in local_inputs:
        try:
            image_data = imfeat.image_tostring(cleanup_image(cv2.imread(fn)), '.jpg')
        except (ValueError, IndexError):
            continue
        image_id = hashlib.md5(image_data).hexdigest()
        if image_id not in unique_ids:
            unique_ids.add(image_id)
            yield image_id, image_data
Ejemplo n.º 14
0
def get_content(content_id):
    if content_id.endswith('.b16.html'):
        return '<img src="/content/%s.jpg" />' % base64.b16decode(content_id[:-9])
    image_data = open(base64.b16decode(content_id[:content_id.find('.b16')])).read()
    if content_id.endswith('.b16.thumb.jpg'):
        try:
            return CACHE[content_id]
        except KeyError:
            out_data = imfeat.image_tostring(imfeat.resize_image(imfeat.image_fromstring(image_data), 200, 200), 'JPEG')
            CACHE[content_id] = out_data
            return out_data
    return image_data
Ejemplo n.º 15
0
 def close(self):
     """
     Yields:
         Tuple of (key, value) where
         key: tile_id\tsubtile_id (easily parsable by the
             KeyFieldBasedPartitioner)
         value: [dist, images] where images are power of 2 JPEG images
             in descending order by size
     """
     assert sorted(self.target_tiles.keys()) == sorted(self.min_dists.keys())
     for key, (dist, images) in self.min_dists.items():  # sorted(
         images_jpg = [imfeat.image_tostring(x, 'jpg') for x in images]  # JPEG's are much smaller
         yield key, [dist, images_jpg]
Ejemplo n.º 16
0
def load_data_iter(local_inputs):
    # Push labeled samples to HDFS
    unique_ids = set()
    for fn in local_inputs:
        try:
            image_data = imfeat.image_tostring(cleanup_image(cv2.imread(fn)),
                                               '.jpg')
        except (ValueError, IndexError):
            continue
        image_id = hashlib.md5(image_data).hexdigest()
        if image_id not in unique_ids:
            unique_ids.add(image_id)
            yield image_id, image_data
Ejemplo n.º 17
0
    def reduce(self, key, values):
        """

        For this to work we need a modified partitioner on the substring before the first tab.
        This method operates on a single tile at level 0, and more tiles at higher zoom levels (4x each level).
        
        Args:
        key: (tile_id, subtile_id)
        values: Iterator of [dist, images] where images are power of 2 JPEG
            images in descending order by size

        Yields:
            Tuple of (key, value) where
            key: Tile name
            value: JPEG Image Data
        """
        # Select minimum distance image, throw away score, and order images from smallest to largest powers of 2
        self._sub_tiles[key] = min(values, key=lambda x: x[0])[1][::-1]
        # As the images were JPEG, we need to make them arrays again
        self._sub_tiles[key] = [imfeat.image_fromstring(x) for x in self._sub_tiles[key]]
        # If we don't have all of the necessary subtiles.
        if len(self._sub_tiles) != _subtiles_per_tile:
            return
        self._verify_subtile_keys()
        for level in range(_levels):
            # Each image is smaller than the tile
            scale = 2 ** level
            num_tiles = scale * scale
            subtiles_per_tile_len = _subtiles_per_tile_length / scale
            subtiles_per_tile = subtiles_per_tile_len ** 2
            subtile_len = _subtile_length * scale
            cur_subtiles = [(self._find_output(key, scale, subtiles_per_tile_len, subtile_len), images[level])
                         for key, images in self._sub_tiles.items()]
            cur_subtiles.sort(key=lambda x: x[0])
            cur_tile = np.zeros((_tile_length, _tile_length, 3), dtype=np.uint8)
            assert len(cur_subtiles) / subtiles_per_tile == num_tiles
            cur_outtile = None
            for subtile_ind, ((xouttile, youttile, xoffset, yoffset), image) in enumerate(cur_subtiles):
                if cur_outtile is None:
                    cur_outtile = (xouttile, youttile)
                assert cur_outtile == (xouttile, youttile)
                #cur_tile.paste(image, (xoffset, yoffset))  # TODO Suspect
                cur_tile[yoffset:yoffset + image.shape[0], xoffset:xoffset + image.shape[1], :] = image
                if not (subtile_ind + 1) % subtiles_per_tile:
                    tile_name = '%d_%d_%d.jpg' % (level, xouttile, youttile)
                    yield tile_name, imfeat.image_tostring(cur_tile, 'jpg')
                    cur_tile = np.zeros((_tile_length, _tile_length, 3), dtype=np.uint8)
                    cur_outtile = None
        self._sub_tiles = {}
Ejemplo n.º 18
0
 def close(self):
     """
     Yields:
         Tuple of (key, value) where
         key: tile_id\tsubtile_id (easily parsable by the
             KeyFieldBasedPartitioner)
         value: [dist, images] where images are power of 2 JPEG images
             in descending order by size
     """
     assert sorted(self.target_tiles.keys()) == sorted(
         self.min_dists.keys())
     for key, (dist, images) in self.min_dists.items():  # sorted(
         images_jpg = [imfeat.image_tostring(x, 'jpg')
                       for x in images]  # JPEG's are much smaller
         yield key, [dist, images_jpg]
 def map(self, name, image_data):
     try:
         image = imfeat.image_fromstring(image_data)
     except:
         hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
         return
     if self.filter_side is not None and min(image.shape[0], image.shape[1]) < self.filter_side:
         hadoopy.counter('DATA_ERRORS', 'ImageTooSmallPre')
         return
     if self.max_side is not None:
         image = imfeat.resize_image_max_side(image, self.max_side)
     if self.filter_side is not None and min(image.shape[0], image.shape[1]) < self.filter_side:
         hadoopy.counter('DATA_ERRORS', 'ImageTooSmallPost')
         return
     yield name, imfeat.image_tostring(image, 'jpg')
 def map(self, name, image_data):
     try:
         image = imfeat.image_fromstring(image_data)
     except:
         hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
         return
     if self.filter_side is not None and min(
             image.shape[0], image.shape[1]) < self.filter_side:
         hadoopy.counter('DATA_ERRORS', 'ImageTooSmallPre')
         return
     if self.max_side is not None:
         image = imfeat.resize_image_max_side(image, self.max_side)
     if self.filter_side is not None and min(
             image.shape[0], image.shape[1]) < self.filter_side:
         hadoopy.counter('DATA_ERRORS', 'ImageTooSmallPost')
         return
     yield name, imfeat.image_tostring(image, 'jpg')
Ejemplo n.º 21
0
    def map(self, key, value):
        """
        Args:
            key: Image name
            value: Image as jpeg byte data

        Yields:
            A tuple in the form of (key, value)
            key: (Image name, (x, y, w, h))
            value: face image (.png)
        """
        try:
            image = imfeat.image_fromstring(value, {'type': 'numpy', 'dtype': 'uint8', 'mode': 'gray'})
            image_color = imfeat.image_fromstring(value, {'type': 'numpy', 'dtype': 'uint8', 'mode': 'bgr'})
        except:
            hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
            return
        faces = _detect_faces(image, self._cascade)
        for x, y, w, h in faces:
            yield (key, (x, y, w, h)), imfeat.image_tostring(image_color[y:y + h, x:x + w, :], '.png')
Ejemplo n.º 22
0
    def map(self, event_filename, video_data):
        """
        Args:
            event_filename: Tuple of (event, filename)
            video_data: Binary video data

        Yields:
            A tuple in the form of ((event, filename, frame_num, frame_time), frame_data)
        """
        ext = '.' + event_filename[1].rsplit('.')[1]
        event, filename = event_filename
        out_count = 0
        with tempfile.NamedTemporaryFile(suffix=ext) as fp:
            fp.write(video_data)
            fp.flush()
            try:
                for frame_num, frame_time, frame in viderator.frame_iter(
                        fp.name, frozen=True, frame_skip=self.frame_skip):
                    if frame_num >= self.max_frames_per_video:
                        break
                    frame_orig = frame
                    if self.remove_bars:
                        sz = self.remove_bars.find_bars(frame)
                        frame = frame[sz[0]:sz[1], sz[2]:sz[3], :]
                        if not frame.size:  # Empty
                            continue
                    if self._feat.predict(frame):
                        if self.output_frame:
                            yield (event, filename, frame_num,
                                   frame_time), imfeat.image_tostring(
                                       frame_orig, 'JPEG')
                        else:
                            yield (event, filename, frame_num, frame_time), ''
                        out_count += 1
                        if out_count >= self.max_outputs_per_video:
                            break
            except IOError:
                hadoopy.counter('PICARUS', 'CantProcessVideo')
    def map(self, event_filename, video_data):
        """
        Args:
            event_filename: Tuple of (event, filename)
            video_data: Binary video data

        Yields:
            A tuple in the form of ((event, filename, frame_num, frame_time), frame_data)
        """
        ext = '.' + event_filename[1].rsplit('.')[1]
        event, filename = event_filename
        out_count = 0
        with tempfile.NamedTemporaryFile(suffix=ext) as fp:
            fp.write(video_data)
            fp.flush()
            try:
                for frame_num, frame_time, frame in viderator.frame_iter(fp.name,
                                                                         frozen=True,
                                                                         frame_skip=self.frame_skip):
                    if frame_num >= self.max_frames_per_video:
                        break
                    frame_orig = frame
                    if self.remove_bars:
                        sz = self.remove_bars.find_bars(frame)
                        frame = frame[sz[0]:sz[1], sz[2]:sz[3], :]
                        if not frame.size:  # Empty
                            continue
                    if self._feat.predict(frame):
                        if self.output_frame:
                            yield (event, filename, frame_num, frame_time), imfeat.image_tostring(frame_orig, 'JPEG')
                        else:
                            yield (event, filename, frame_num, frame_time), ''
                        out_count += 1
                        if out_count >= self.max_outputs_per_video:
                            break
            except IOError:
                hadoopy.counter('PICARUS', 'CantProcessVideo')
    def map(self, event_filename, video_data):
        """
        Args:
            event_filename: Tuple of (event, filename)
            video_data: Binary video data

        Yields:
            A tuple in the form of ((event, filename, frame_num, frame_time), frame_data)
        """
        ext = '.' + event_filename[1].rsplit('.')[1]
        event, filename = event_filename
        heap = [(float('-inf'), None)] * self.max_outputs
        with tempfile.NamedTemporaryFile(suffix=ext) as fp:
            fp.write(video_data)
            fp.flush()
            sys.stderr.write('Prevideo\n')
            try:
                for frame_num, frame_time, frame in viderator.frame_iter(fp.name,
                                                                         frozen=True,
                                                                         frame_skip=self.frame_skip):
                    sys.stderr.write('FrameNum[%d]\n' % frame_num)
                    if frame_num >= self.max_frames_per_video:
                        break
                    frame_orig = frame
                    if self.remove_bars:
                        sz = self.remove_bars.find_bars(frame)
                        frame = frame[sz[0]:sz[1], sz[2]:sz[3], :]
                        if not frame.size:  # Empty
                            continue
                    st = time.time()
                    c = self._feat(frame)[0]
                    sys.stderr.write('FrameTime[%f]\n' % (time.time() - st))
                    print('FrameTime[%f]' % (time.time() - st))
                    if c > heap[0][0]:
                        if self.output_frame:
                            heapq.heappushpop(heap,
                                              (c, ((event, filename, frame_num, frame_time), imfeat.image_tostring(frame_orig, 'JPEG'))))
                        else:
                            heapq.heappushpop(heap,
                                              (c, ((event, filename, frame_num, frame_time), '')))
            except IOError:
                hadoopy.counter('PICARUS', 'CantProcessVideo')
        for x in heap[-self.max_outputs_per_video:]:
            heapq.heappushpop(self.heap, x)
 def _map(self, row, image_binary):
     try:
         image = imfeat.image_fromstring(image_binary)
         yield row, imfeat.image_tostring(imfeat.resize_image_max_side(image, self.max_side), 'jpg')
     except:
         hadoopy.counter('DATA_ERRORS', 'ImageLoadError')
Ejemplo n.º 26
0
def _action_handle(function, params, image):
    print('Action[%s]' % function)
    try:
        ff = FEATURE_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        return {'feature': ff(**params)(image).tolist()}
    except KeyError:
        pass
    try:
        sf = SEARCH_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        print(sf.feature)
        out = {'results': sf.analyze_cropped(image)}
        if function == 'see/search/masks':
            out['classes'] = CLASS_COLORS
        return out
    except KeyError:
        pass
    try:
        cf = CLASSIFY_FUN[function]
        image = imfeat.resize_image_max_side(image, 320)  # TODO: Expose this
        return {'results': cf(image)}
    except KeyError:
        pass
    if function == 'see/texton' or function == 'see/texton_ilp':
        image = cv2.resize(image,
                           (320, int(image.shape[0] * 320. / image.shape[1])))
        image = np.ascontiguousarray(image)
        semantic_masks = TEXTON(image)
        if function == 'see/texton_ilp':
            ilp_pred = CLASSIFY_FUN['see/classify/indoor'](
                imfeat.resize_image_max_side(image, 320))
            try:
                bin_index = [
                    x for x, y in enumerate(ILP_WEIGHTS['bins'])
                    if y >= ilp_pred
                ][0]
            except IndexError:
                bin_index = ILP_WEIGHTS['ilp_tables'].shape[1]
            if bin_index != 0:
                bin_index -= 1
            ilp_weights = ILP_WEIGHTS['ilp_tables'][:, bin_index]
            print('ILP Pred[%s] Weights[%s]' % (ilp_pred, ilp_weights))
            semantic_masks *= ilp_weights
        #min_probability = float(params.get('min_probability', 0.5))
        #semantic_masks = np.dstack([semantic_masks, np.ones_like(semantic_masks[:, :, 0]) * min_probability])
        texton_argmax2 = np.argmax(semantic_masks, 2)
        image_string = imfeat.image_tostring(COLORS_BGR[texton_argmax2], 'png')
        out = {'argmax_pngb64': base64.b64encode(image_string)}
        out['classes'] = CLASS_COLORS
        return out
    if function == 'see/colors':
        image = cv2.resize(image,
                           (320, int(image.shape[0] * 320. / image.shape[1])))
        image = np.ascontiguousarray(image)
        masks = COLOR_NAMING.make_feature_mask(image)
        mask_argmax = np.argmax(masks, 2)
        image_string = imfeat.image_tostring(
            COLOR_NAMING.color_values[mask_argmax], 'png')
        return {'argmax_pngb64': base64.b64encode(image_string)}
    if function == 'see/faces':
        results = [map(float, x) for x in FACES._detect_faces(image)]
        return {
            'faces': [{
                'tl_x': x[0],
                'tl_y': x[1],
                'width': x[2],
                'height': x[3]
            } for x in results]
        }
    return {}