Ejemplo n.º 1
0
def fetch():
    try:
        path = request.args.get('path')
        frame_ids = json.loads(request.args.get('frame'))

        if FILESYSTEM == 'local':
            path = '/host' + path

        video_file = RandomReadFile(storage, path.encode('ascii'))
        video = Decoder(video_file)

        img_list = video.retrieve(frame_ids)
        img_list = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in img_list]

        # store imgs to disk
        os.makedirs('/frameserver/tmp/', exist_ok=True)
        pickle.dump(img_list, open('/frameserver/tmp/test.pkl', 'wb'))
        return Response(traceback.format_exc(), mimetype='text/plain')

        # send imgs through http
        # img_list_encode = [cv2.imencode('.jpg', img)[1] for img in img_list]
        # import base64
        # with open('test.pkl', 'wb') as fp:
        #     data = pickle.dumps(img_list_encode)
        #     encoded = base64.b64encode(data)
        #     fp.write(encoded)
        # return send_file('test.pkl', as_attachment=True)

    except Exception:
        return Response(traceback.format_exc(), mimetype='text/plain')
Ejemplo n.º 2
0
def prepare_hairstyle(video_path,
                      face_list,
                      storage=None,
                      out_folder='/app/result/clothing/images/'):
    #     (frame_id, face_id, identity_id, bbox) = face_list[0]

    fid_list = [face[0] for face in face_list]
    #     print("fid_list", fid_list)

    # load frames from google cloud
    if storage is None:
        storage = StorageBackend.make_from_config(
            StorageConfig.make_gcs_config('esper'))
    video_file = RandomReadFile(storage, video_path.encode('ascii'))
    video = Decoder(video_file)
    img_list = video.retrieve(fid_list)
    img_list = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in img_list]
    #     print("load %d frames" % len(fid_list))

    H, W = img_list[0].shape[:2]
    #     result = []
    for i, face in enumerate(face_list):
        (frame_id, face_id, identity_id, bbox) = face
        frame = img_list[i]
        x1 = int(bbox[0] * W)
        y1 = int(bbox[1] * H)
        x2 = int(bbox[2] * W)
        y2 = int(bbox[3] * H)
        w = max(y2 - y1, x2 - x1) * 3 // 4
        cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
        x1 = cx - w if cx - w > 0 else 0
        x2 = cx + w if cx + w < W else W
        y1 = cy - w if cy - w > 0 else 0
        y2 = cy + w if cy + w < H else H
        filename = '{}_{}.jpg'.format(identity_id, face_id)
        cv2.imwrite(os.path.join(out_folder, filename), img_list[i][y1:y2,
                                                                    x1:x2])
Ejemplo n.º 3
0
def fetch():
    try:
        path = request.args.get('path')
        frame = int(request.args.get('frame'))
        scale = request.args.get('scale', None)
        height = request.args.get('height', None)

        if FILESYSTEM == 'local':
            path = '/host' + path

        video_file = RandomReadFile(storage, path.encode('ascii'))

        video = Decoder(video_file)
        img = video.retrieve([frame])[0]
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)

        [cur_height, cur_width, _] = img.shape
        target_width = None
        target_height = None
        if scale is not None:
            scale = float(scale)
            target_width = cur_width * scale
            target_height = cur_height * scale
        elif height is not None:
            height = float(height)
            target_height = height
            target_width = target_height / cur_height * cur_width

        if target_width is not None:
            img = cv2.resize(img, (int(target_width), int(target_height)))

        return send_file(io.BytesIO(cv2.imencode('.jpg', img)[1]),
                         mimetype='image/jpg')

    except Exception:
        return Response(traceback.format_exc(), mimetype='text/plain')
Ejemplo n.º 4
0
    def _load_output_file(self, item_id, rows, fn=None):
        assert len(rows) > 0

        metadata_path = '{}/tables/{}/{}_{}_metadata.bin'.format(
            self._db_path, self._table._descriptor.id, self._descriptor.id,
            item_id)
        try:
            metadata_file = RandomReadFile(self._storage, metadata_path)
        except UserWarning:
            raise ScannerException(
                'Path {} does not exist'.format(metadata_path))

        data_path = '{}/tables/{}/{}_{}.bin'.format(self._db_path,
                                                    self._table._descriptor.id,
                                                    self._descriptor.id,
                                                    item_id)
        try:
            data_file = RandomReadFile(self._storage, data_path)
        except UserWarning:
            raise ScannerException('Path {} does not exist'.format(path))

        # HACK: this should get eliminated once metadata format saves offsets instead of lengths
        last_row_edge_case = rows == [self._table._descriptor.end_rows[-1] - 1]
        if last_row_edge_case:
            size = metadata_file.size()
            metadata_file.seek(size - 8)
            (buf_len, ) = struct.unpack('=Q', metadata_file.read(8))
            data_file.seek(data_file.size() - buf_len)
            buf = data_file.read(buf_len)
            if len(buf) == 0:
                yield None
            elif fn is not None:
                yield fn(buf, self._db.protobufs)
            else:
                yield buf
            return

        sparse_load = len(rows) < LOAD_SPARSITY_THRESHOLD

        metadata_contents = metadata_file.read()
        if not sparse_load:
            data_contents = data_file.read()

        lens = []
        total_rows = 0
        i = 0
        while i < len(metadata_contents):
            (num_rows, ) = struct.unpack("=Q", metadata_contents[i:i + 8])
            total_rows += num_rows
            i += 8
            for fi in range(num_rows):
                (buf_len, ) = struct.unpack("=Q", metadata_contents[i:i + 8])
                lens.append(buf_len)
                i += 8

        start_pos = None
        pos = 0
        rows = rows if len(rows) > 0 else list(range(total_rows))
        for fi in range(total_rows):
            old_pos = pos
            pos += lens[fi]
            if start_pos is None:
                start_pos = old_pos

        rows_idx = 0
        i = start_pos
        for j, buf_len in enumerate(lens):
            if rows_idx < len(rows) and j == rows[rows_idx]:
                if sparse_load:
                    data_file.seek(i)
                    buf = data_file.read(buf_len)
                else:
                    buf = data_contents[i:i + buf_len]

                # len(buf) == 0 when element is null
                if len(buf) == 0:
                    yield None
                elif fn is not None:
                    yield fn(buf, self._db.protobufs)
                else:
                    yield buf
                rows_idx += 1
            i += buf_len