コード例 #1
0
def write(name, filename, resolution=None, codec=None, fps=None, budget=None):
    if not LogicalVideo.exists_by_name(name):
        with log_runtime('API: write'):
            logical = LogicalVideo.add(name)
            physical = PhysicalVideo.load(logical, filename, resolution, codec,
                                          fps)
            logical.budget = physical.size() * (
                budget or (logical.DEFAULT_ENCODED_BUDGET_MULTIPLIER
                           if encoded[physical.codec] else
                           logical.DEFAULT_RAW_BUDGET_MULTIPLIER))
    else:
        print("ERROR: video already exists")
コード例 #2
0
def reconstruct_gop(gop, temp_path, times, resolution, codec, roi,
                    fps):  #, filenames, cache_sequences):
    with log_runtime(
            f'Reconstruct GOP {gop.video().id}.{gop.id} ({gop.video().width}x{gop.video().height}, {gop.video().codec}, t={gop.start_time:0.2f}-{gop.end_time:0.2f})'
    ):
        if not gop.joint:
            input_filename = gop.filename
        else:
            input_filename = path.join(
                temp_path, path.basename(gop.filename.format('original')))
            # Code to correct codec/resolution to skip transcode
            VFS.instance().compression.co_decompress(gop, input_filename)

        gop_times = (max(times[0] - gop.start_time, 0),
                     (min(gop.end_time, times[1]) -
                      gop.start_time)) if times else None

        if (gop.video().resolution() != resolution
                or gop.video().codec != codec or
                #TODO (roi is not None and roi != (0, 0, *resolution)) or
            (times is None or
             0 < gop_times[0] < gop_times[1] < gop.end_time - gop.start_time)):
            if gop.zstandard:
                logging.debug("Reconstruction: decompressing raw GOP %d",
                              gop.id)
                vfs.rawcompression.decompress(gop)

            container = '.mp4' if encoded[codec] else ''
            resize_filename = path.join(
                temp_path,
                'resize-{}.{}{}'.format(path.basename(input_filename), codec,
                                        container))
            new_mse = vfs.videoio.reformat(
                input_filename,
                resize_filename,
                input_resolution=gop.video().resolution(),
                output_resolution=resolution,
                input_codec=gop.video().codec,
                output_codec=codec,
                input_fps=gop.fps,
                output_fps=fps,
                roi=roi,
                times=gop_times if gop_times !=
                (gop.start_time, gop.end_time) else None)
            return resize_filename, os.path.getsize(resize_filename), (
                gop, resize_filename, new_mse)
        else:
            logging.info(f"Cache hit for GOP {gop.id}")
            return input_filename if gop.zstandard is None else compressed_filename(
                input_filename), gop.original_size, []
コード例 #3
0
def read(name,
         filename=None,
         resolution=None,
         roi=None,
         t=None,
         codec=None,
         fps=None,
         child_process=False):
    #if isinstance(name_or_tuple, tuple):
    #    name, filename, resolution, roi, t, codec, fps, child_process = name_or_tuple
    #else:
    #    name = name_or_tuple
    if t is not None and t[1] - t[0] < 1:
        vfs.reconstruction.POOL_SIZE = 1

    if LogicalVideo.exists_by_name(name):
        with log_runtime('API: read', level=logging.ERROR):
            logical = LogicalVideo.get_by_name(name)

            t = tuple(map(float, t)) if t is not None else (0,
                                                            logical.duration())
            resolution = resolution or (
                (roi[2] - roi[0], roi[3] -
                 roi[1]) if roi else next(logical.videos()).resolution())
            codec = codec or H264

            gops = vfs.solver.solve(logical, resolution, roi, t, fps, codec)

            if filename is not None:
                vfs.reconstruction.reconstruct(filename, logical, gops,
                                               resolution, roi, t, fps, codec)
            else:
                filename = os.path.join(tempfile.gettempdir(),
                                        uuid.uuid4().hex + ".mp4")
                result_filename = vfs.reconstruction.reconstruct(
                    filename,
                    logical,
                    gops,
                    resolution,
                    roi,
                    t,
                    fps,
                    codec,
                    is_stream=True)
                return open(result_filename,
                            'rb') if not child_process else result_filename
    else:
        print("ERROR: video does not exist")
コード例 #4
0
def temp():
    name = 'v'
    threshold = 50
    fps = 30

    with log_runtime('Search.VFS', level=logging.CRITICAL):
        with engine.VFS(transient=True):
            for start_time, confidence, y1, x1, y2, x2 in parse_index_file(
                    'index_cars_vfs.csv'):
                end_time = start_time + (1 / fps)
                api.read(name,
                         "foo.mp4",
                         t=(start_time, end_time),
                         roi=(y1, x1, y2, x2),
                         codec='rgb')
                pass
    exit(1)
コード例 #5
0
    def closest_match(cls,
                      epoch,
                      gop,
                      matches_required=DEFAULT_MATCHES_REQUIRED,
                      radius=400):  #400):
        from vfs.gop import Gop

        with log_runtime("Joint compression candidate selection"):
            cluster_gops = VFS.instance().database.execute(
                "SELECT id, filename, descriptors FROM gops WHERE cluster_id = ? AND physical_id != ? AND descriptors IS NOT NULL AND joint = 0 AND examined <= ? AND NOT EXISTS (SELECT id FROM gop_joint_aborted WHERE gop1 = ? AND gop2 = id)",
                (gop.cluster_id, gop.physical_id, epoch, gop.id)).fetchall()
            candidate_gops = []
            for gop2_id, gop2_filename, gop2_descriptors in cluster_gops:
                success, matches = cls.adhoc_match(gop.descriptors,
                                                   gop2_descriptors)
                if success and len(matches) > matches_required:
                    candidate_gops.append(
                        (gop.filename.split('-')[-1] == gop2_filename.split(
                            '-')[-1], len(matches), gop2_id, matches))
                    if candidate_gops[-1][1] > 400 or candidate_gops[-1][
                            0]:  # Break on "good enough" match to try out
                        break
            candidate_gop = sorted(candidate_gops,
                                   reverse=True)[0] if candidate_gops else None
            return [(candidate_gop[2], candidate_gop[3])]

        matcher = cls._get_matcher(epoch, gop)
        physical_map = cls._get_physical_map(gop.cluster_id).get(
            gop.physical_id, None)
        index_map = cls._get_index_map(gop.cluster_id)
        all_matches = matcher.radiusMatch(queryDescriptors=gop.descriptors,
                                          maxDistance=radius)
        good_matches = defaultdict(lambda: [])
        first_matches = {}
        complete = set()

        # For each frame/descriptor pair, find matches that pass the Lowe threshold test
        #all_matches = all_matches[:5000]
        filtered_matches = (
            m for d in all_matches for m in d
            if index_map[m.imgIdx] > gop.id and m.imgIdx not in physical_map
            and not (m.imgIdx, m.queryIdx) in complete)
        #for descriptor_matches in all_matches:
        #    for match in descriptor_matches:
        with log_runtime("Lowes test"):
            for match in filtered_matches:
                #if match.imgIdx not in physical_map and \
                #    index_map[match.imgIdx] > gop.id and \
                #if not (match.imgIdx, match.queryIdx) in complete:
                # First match
                if (match.imgIdx, match.queryIdx) not in first_matches:
                    first_matches[match.imgIdx, match.queryIdx] = match
                # Second match
                else:
                    if first_matches[
                            match.imgIdx, match.
                            queryIdx].distance < cls.LOWE_THRESHOLD * match.distance:
                        good_matches[match.imgIdx].append(
                            first_matches[match.imgIdx, match.queryIdx])
                    del first_matches[match.imgIdx, match.queryIdx]
                    complete.add((match.imgIdx, match.queryIdx))

        # Some matches may not have a second match to apply Lowe's threshold on.
        # Check to see if we should have seen it and count it if so.
        for first_match in first_matches.values():
            if first_match.distance / cls.LOWE_THRESHOLD < radius:
                good_matches[first_match.imgIdx].append(first_match)

        ignore_ids = set(VFS.instance().database.execute(
            'SELECT gop2 FROM gop_joint_aborted WHERE gop1 = ?',
            gop.id).fetchall())
        best_indexes = [
            index for index, matches in good_matches.items()
            if len(matches) >= matches_required
            and index_map[index] not in ignore_ids
        ]
        #best_ids = [index_map[index] for index in best_indexes if index_map[index] not in ignore_ids]
        #best_id = VFS.instance().database.execute(
        #    'SELECT MIN(id) FROM gops WHERE joint=0 AND id in ({})'.format(','.join(map(str, best_ids)))).fetchone()[0]
        #best_index = max((index for index, matches in good_matches.items() if len(matches) >= matches_required),
        #                 default=None)
        #best = sorted([(index_map[index], good_matches[index]) for index in best_indexes], key=lambda pair: len(pair[1]), reverse=True)
        gops = Gop.get_all(index_map[index] for index in best_indexes)
        best = [(gop, good_matches[index])
                for gop, index in zip(gops, best_indexes)]
        best = sorted(best,
                      key=lambda pair: (pair[0].filename.split('-')[-1] == gop.
                                        filename.split('-')[-1], len(pair[1])),
                      reverse=True)
        best = best[:len(best) // 20 + 1]  # Keep top 5%
        best = [(mgop.id, matches) for mgop, matches in best]
        #best = sorted([(mgop.id, cv2.compareHist(gop.histogram, mgop.histogram, cv2.HISTCMP_CHISQR), gop.filename, mgop.filename, matches) for (mgop, matches) in best], key=lambda pair: (len(pair[2]), cv2.compareHist(gop.histogram, pair[0].histogram, cv2.HISTCMP_CHISQR), -pair[0].id), reverse=True)
        #best = sorted([(id, cv2.compareHist(gop.histogram, Gop.get(id).histogram, cv2.HISTCMP_CHISQR), gop.filename, Gop.get(id).filename, matches) for (id, matches) in best], key=lambda pair: (len(pair[2]), cv2.compareHist(gop.histogram, Gop.get(pair[0]).histogram, cv2.HISTCMP_CHISQR), -pair[0]), reverse=True)

        return best

        if best_id is not None:
            return Gop.get(best_id), good_matches[best_indexes[best_ids.index(
                best_id)]]  #best_index]
        #if best_index is not None:
        #    return Gop.get(index_map[best_index]), good_matches[best_index]
        #return Gop.get(VFS.instance().database.execute(
        #    '''SELECT id FROM gops
        #              WHERE cluster_id = ? AND joint = 0
        #              LIMIT 1
        #              OFFSET ?''', (gop.cluster_id, best_index)).fetchone()[0]), good_matches[best_index]
        else:
            return None, None
コード例 #6
0
def delete(name):
    if LogicalVideo.exists_by_name(name):
        with log_runtime('API: delete'):
            LogicalVideo.delete(LogicalVideo.get_by_name(name))
    else:
        print("ERROR: video does not exist")
コード例 #7
0
def list():
    with log_runtime('API: list'):
        return [logical.name for logical in LogicalVideo.get_all()]
コード例 #8
0
            keypoints2, descriptors2 = Descriptor.create(frame2)
            engine.database.execute(
                'UPDATE gops SET keypoints = ?, descriptors = ? WHERE id = ?',
                (keypoints1, descriptors1, gop1.id))
            engine.database.execute(
                'UPDATE gops SET keypoints = ?, descriptors = ? WHERE id = ?',
                (keypoints2, descriptors2, gop2.id))
            gop1.keypoints, gop2.keypoints = keypoints1, keypoints2
            gop1.descriptors, gop2.descriptors = descriptors1, descriptors2

            # Homography
            #with log_runtime('Homography:'):
            success, matches = Descriptor.adhoc_match(descriptors1,
                                                      descriptors2)
            JointCompression.estimate_homography(gop1, gop2, matches)

            # Joint compression
            with log_runtime('Joint:') as t:
                JointCompression.co_compress(gop1,
                                             gop2,
                                             matches,
                                             abort_psnr_threshold=0,
                                             dryrun=True)

            mt += t.duration

            #for _ in range(50):
            #with log_runtime('Estimation:'):
            #     JointCompression.estimate_homography(gop1, gop2)
            #print(1)
        print(mt / n)
コード例 #9
0
    def estimate_homography(cls, gop1, gop2, frame1=None, frame2=None, matches=None, fast=True):
        if matches is None:
            frame1 = frame1 if frame1 is not None else read_first_frame(gop1.filename)
            #with VideoReader(gop1.filename, shape=gop1.video().shape(), codec=gop1.video().codec, limit=1) as reader1:
            #    frame1 = reader1.read()
            frame2 = frame2 if frame2 is not None else read_first_frame(gop2.filename)
            #with VideoReader(gop2.filename, shape=gop1.video().shape(), codec=gop2.video().codec, limit=1) as reader2:
            #    frame2 = reader2.read()
            keypoints1, descriptors1 = Descriptor.create(frame1, fast=fast)
            keypoints2, descriptors2 = Descriptor.create(frame2, fast=fast)

            has_homography, matches = Descriptor.adhoc_match(descriptors1, descriptors2, fast=fast)
            #keypoints1, keypoints2 = keypoints1, keypoints2
        else:
            keypoints1, keypoints2 = gop1.keypoints, gop2.keypoints
            has_homography = True

        with log_runtime('Homography:'):
            H, Hi = vfs.homography.project(keypoints1, keypoints2, matches)

        frame2_overlap_yoffset = -int(round(np.dot([0, gop1.video().height, 1], Hi)[0]))
        #frame2_overlap_yoffset = max(-int(round(np.dot([0, gop1.video().height, 1], Hi)[0])), 0)
        frame1_left_width = roundeven(Hi.dot([0,0,1])[0])
        frame2_right_width = max(roundeven(gop2.video().width - H.dot([gop1.video().width, 0, 1])[0] / H.dot([gop1.video().width, 0, 1])[2]), 0)
        overlap_height = gop2.video().height + 2 * frame2_overlap_yoffset
        overlap_width = gop1.video().width - frame1_left_width

        if frame1_left_width < 0:
            return H, Hi, cls._create_image(height=0, width=0), cls._create_image(height=0, width=0), None, None, None, None, True, has_homography

        if frame1_left_width < 0:
            frame1_left_width = 0
        if overlap_width < 0:
            overlap_width = 0
        if overlap_height < 0:
            overlap_height = 0

        pretransform_points = np.float32([
            [frame1_left_width, 0],
            [gop1.video().width, 0],
            [frame1_left_width, gop1.video().height],
            [gop1.video().width, gop1.video().height]])
        posttransform_points = np.float32(
            [[0, 0 + frame2_overlap_yoffset],
             [gop1.video().width - frame1_left_width + 1, 0 + frame2_overlap_yoffset],
             [0, gop1.video().height + frame2_overlap_yoffset],
             [gop1.video().width - frame1_left_width + 1, gop1.video().height + frame2_overlap_yoffset]])

        transform = cv2.getPerspectiveTransform(pretransform_points, posttransform_points)
        inverse_transform = cv2.getPerspectiveTransform(posttransform_points, pretransform_points)
        # Hi = transform.dot(Hi)
        Ho = H
        H, Hi = H.dot(inverse_transform), transform.dot(Hi)

        logging.info('Frames left %d, right %d, overlap %d x %d' % (frame1_left_width, frame2_right_width, overlap_height, overlap_width))
        left = cls._create_image(gop1.video(), width=frame1_left_width)
        right = cls._create_image(gop2.video(), width=frame2_right_width)
        overlap = cls._create_image(height=overlap_height, width=overlap_width)
        overlap_subframe = cls._create_image(height=gop1.video().height, width=overlap_width, dtype=np.uint16)
        recovered_frame2 = np.empty(gop2.video().shape(), dtype=np.uint8)

        return H, Hi, left, right, overlap, overlap_subframe, recovered_frame2, frame2_overlap_yoffset, False, has_homography
コード例 #10
0
    def co_compress(cls, gop1, gop2, matches, abort_psnr_threshold=25, dryrun=False, gops_reversed=False):
        #cls.output.write('%s,%d,%d,%s,%d,%d\n' % (gop1.video().logical().name, gop1.video().id, gop1.id,
        #             gop2.video().logical().name, gop2.video().id, gop2.id))
        #cls.output.flush()
        logging.info('Joint compress %s-%d-%d and %s-%d-%d (%d matches, %s, %s)',
                     gop1.video().logical().name, gop1.video().id, gop1.id,
                     gop2.video().logical().name, gop2.video().id, gop2.id,
                     len(matches),
                     gop1.filename, gop2.filename)

        assert(gop1.id != gop2.id)
        assert(gop1.video().codec == gop2.video().codec)
        assert(not gop1.joint)
        assert(not gop2.joint)

        H, Hi, left, right, overlap, overlap_subframe, recovered_frame2, frame2_overlap_yoffset, inverted_homography, has_homography = cls.estimate_homography(gop1, gop2, matches=None, fast=False) #matches) #TODO fast, matches
        Hs = np.hstack([[0], H.flatten()])

        #H, Hi = homography.project(gop1.keypoints, gop2.keypoints, matches)

        #frame2_overlap_yoffset = -int(round(np.dot([0, gop1.video().height, 1], Hi)[0]))
        #frame1_left_width = roundeven(Hi.dot([0,0,1])[0])
        #frame2_right_width = roundeven(gop2.video().width - H.dot([gop1.video().width, 0, 1])[0] / H.dot([gop1.video().width, 0, 1])[2])
        #overlap_height = gop2.video().height + 2 * frame2_overlap_yoffset
        #overlap_width = gop1.video().width - frame1_left_width

        if has_homography and inverted_homography and not gops_reversed:
            return cls.co_compress(gop2, gop1, matches, gops_reversed=True)
        # Are videos identical?
        elif not inverted_homography and left.shape[1] == 0 and right.shape[1] == 0:
        #if frame1_left_width == 0 and frame2_right_width == 0:
            return cls.deduplicate(gop1, gop2)
        # Are left/right frames too small to encode?
        elif 0 <= left.shape[1] < 32 or 0 <= right.shape[1] < 32 or 0 <= overlap.shape[1] < 32:
            logging.info('Joint compression aborted; left/right/overlap frames too small (%d, %d)', gop1.id, gop2.id)
            VFS.instance().database.execute(
                'UPDATE gops SET examined=9999998 '  # 9999998 = possibly examine again?
                'WHERE id in (?, ?)',
                (gop1.id, gop2.id)).close()
            return 0
        #    return cls.deduplicate(gop1, gop2)
        else:
            #pretransform_points = np.float32([
            #    [frame1_left_width, 0],
            #    [gop1.video().width, 0],
            #    [frame1_left_width, gop1.video().height],
            #    [gop1.video().width, gop1.video().height]])
            #posttransform_points = np.float32(
            #    [[0, 0 + frame2_overlap_yoffset],
            #    [gop1.video().width - frame1_left_width + 1, 0 + frame2_overlap_yoffset],
            #    [0, gop1.video().height + frame2_overlap_yoffset],
            #    [gop1.video().width - frame1_left_width + 1, gop1.video().height + frame2_overlap_yoffset]])

            #transform = cv2.getPerspectiveTransform(pretransform_points, posttransform_points)
            #inverse_transform = cv2.getPerspectiveTransform(posttransform_points, pretransform_points)
            ##Hi = transform.dot(Hi)
            #Ho = H
            #H, Hi = H.dot(inverse_transform), transform.dot(Hi)

            #left = cls._create_image(gop1.video(), width=frame1_left_width)
            #right = cls._create_image(gop2.video(), width=frame2_right_width)
            #overlap = cls._create_image(height=overlap_height, width=overlap_width)
            #recovered_frame2 = np.empty(gop2.video().shape(), dtype=np.uint8)

            filenametemplate = '{}-{{}}{}'.format(*path.splitext(gop1.filename))
            abort = False
            frame_index = 0
            total_left_psnr, total_right_psnr = 0, 0
            codec = gop1.video().codec

            # if frame1_left_width else NullWriter() as leftwriter, \
            #if frame2_right_width else NullWriter() as rightwriter, \
            with log_runtime('Joint compression:'):
                with VideoReader(gop1.filename, gop1.video().shape(), codec) as reader1, \
                     VideoReader(gop2.filename, gop2.video().shape(), codec) as reader2, \
                     VideoWriter(filenametemplate.format(cls.LEFT), left.shape, codec) \
                             if left.shape[1] else NullWriter() as leftwriter, \
                     VideoWriter(filenametemplate.format(cls.RIGHT), right.shape, codec) \
                             if right.shape[1] else NullWriter() as rightwriter, \
                     VideoWriter(filenametemplate.format(cls.OVERLAP), overlap.shape, codec) as overlapwriter:
                    while (not reader1.eof or not reader2.eof) and not abort:
                        attempts = 0
                        frame_index += 1
                        frame1, frame2 = reader1.read(), reader2.read()

                        while attempts < 2:
                            attempts += 1

                            if frame1 is not None and frame2 is not None:
                                pass
                            elif frame1 is not None and frame2 is None:
                                frame2 = np.zeros(gop2.video().shape(), dtype=np.uint8)
                            elif frame2 is not None and frame1 is None:
                                frame1 = np.zeros(gop1.video().shape(), dtype=np.uint8)

                            if frame1 is not None or frame2 is not None:
                                # Create and write overlap
                                cv2.warpPerspective(frame2, Hi, dsize=tuple(reversed(overlap.shape[:2])), dst=overlap)

                                # Left join
                                """cv2.imwrite('frame1.png', frame1)
                                cv2.imwrite('frame2.png', frame2)
                                cv2.imwrite('overlap.png', overlap)
                                print(gop1.filename)
                                print(gop2.filename)
                                print(gop1.video().shape())
                                print(left.shape)
                                print(right.shape)
                                print(overlap.shape)
                                print(frame2_overlap_yoffset, frame2_overlap_yoffset + gop1.video().height)
                                print(left.shape[1])
                                print(Hi)"""
                                #tmp = frame2_overlap_yoffset
                                #if frame2_overlap_yoffset < 0:
                                #    frame2_overlap_yoffset = 0

                                # Mean join
                                #cls.mean_join(overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height], overlap_subframe, frame1[:, left.shape[1]:])
                                #np.copyto(overlap_subframe, overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height])
                                #overlap_subframe = (overlap_subframe + frame1[:, left.shape[1]:]) // 2
                                #np.copyto(overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height], overlap_subframe)

                                #mean_subframe = np.copy(overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height]).astype(np.uint16) // 2
                                ##mean_subframe = np.copy(overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height]).astype(np.uint16)
                                #mean_subframe = (mean_subframe + frame1[:, left.shape[1]:]) // 2
                                ##mean_subframe //= 2
                                #np.copyto(overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height],
                                #          mean_subframe.astype(np.uint8))

                                # Left join
                                cls.left_join(overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height], frame1[:, left.shape[1]:])
                                #np.copyto(overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height],
                                #          frame1[:, left.shape[1]:])

                                #frame2_overlap_yoffset = tmp
                                # Mean join (has a bug in non-overlapping left)
                                #overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height] //= 2
                                #overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height] += frame1[:, left.shape[1]:] // 2

                                if left.shape[1] != 0:
                                    np.copyto(left, frame1[:, :left.shape[1]])
                                if right.shape[1] != 0:
                                    np.copyto(right, frame2[:, -right.shape[1]:])

                                right_psnr = cls.recovered_right_psnr(H, overlap, frame2, recovered_frame2, right)
                                left_psnr = cls.recovered_left_psnr(overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height], frame1, left)

                                #cv2.warpPerspective(overlap, H, dsize=tuple(reversed(recovered_frame2.shape[:2])), dst=recovered_frame2)
                                #np.copyto(recovered_frame2[:, -right.shape[1]:], frame2[:, -right.shape[1]:])
                                #psnr = vfs.utilities.psnr(frame2, recovered_frame2)

                                if right_psnr < abort_psnr_threshold:
                                    if attempts == 2:
                                        abort = True
                                        break
                                    else:
                                        logging.debug(f"Recomputing homography ({gop1.id} <-> {gop2.id}) PSNR {right_psnr:0.1f}")
                                        #TODO save new homography
                                        H, Hi, left, right, overlap, overlap_subframe, recovered_frame2, frame2_overlap_yoffset, inverted_homography, has_homography = cls.estimate_homography(
                                            gop1, gop2, frame1=frame1, frame2=frame2, matches=None, fast=True) #matches)
                                        np.vstack([Hs, np.hstack([[frame_index], H.flatten()])])
                                        #Hs.append(np.hstack([[frame_index], H.flatten()]))
                                else:
                                    total_right_psnr += right_psnr
                                    total_left_psnr += left_psnr
                                    attempts = 999
                                    #break

                                    leftwriter.write(left)
                                    rightwriter.write(right)
                                    overlapwriter.write(overlap)

            if abort:
                #cv2.imwrite('abortframe1_%d.png' % gop1.id, frame1)
                #cv2.imwrite('abortframe2_%d.png' % gop1.id, frame2)
                #cv2.imwrite('abortleft_%d.png' % gop1.id, left)
                #cv2.imwrite('abortright_%d.png' % gop1.id, right)
                cv2.imwrite('abortoverlap.png', overlap)
                cv2.imwrite('abortrecovered1.png', np.hstack([left, overlap[frame2_overlap_yoffset:frame2_overlap_yoffset + gop1.video().height]]))
                cv2.imwrite('abortrecovered2.png', recovered_frame2)
                logging.info('Joint compression aborted; quality threshold violated %d < %d (%d vs %d)',
                             right_psnr, abort_psnr_threshold, gop1.id, gop2.id)
                #ssim = compare_ssim(frame2, recovered_frame2, multichannel=True)
                os.remove(filenametemplate.format(cls.LEFT))
                os.remove(filenametemplate.format(cls.OVERLAP))
                os.remove(filenametemplate.format(cls.RIGHT))
                VFS.instance().database.executebatch([
                    f'INSERT INTO gop_joint_aborted(gop1, gop2) VALUES ({gop1.id}, {gop2.id})',
                    f'INSERT INTO gop_joint_aborted(gop1, gop2) VALUES ({gop2.id}, {gop1.id})',
                    f'UPDATE gops SET examined=examined + 1 WHERE id in ({gop1.id}, {gop2.id})'])
                return 0
            elif not dryrun:
                    original_size = path.getsize(gop1.filename) + path.getsize(gop2.filename)

                    VFS.instance().database.execute(
                        'UPDATE gops SET examined=9999999, joint=1, original_filename=filename, filename=?, homography=?, shapes=?, is_left=(id=?) '
                               'WHERE id in (?, ?)',
                               (filenametemplate, np.vstack(Hs), np.vstack([left.shape, overlap.shape, right.shape]),
                                gop1.id, gop1.id, gop2.id)).close()

                    os.remove(gop1.filename)
                    os.remove(gop2.filename)

                    bytes_saved = (original_size -
                                 (path.getsize(filenametemplate.format(cls.LEFT)) +
                                  path.getsize(filenametemplate.format(cls.OVERLAP)) +
                                  path.getsize(filenametemplate.format(cls.RIGHT))))
                    logging.info('Joint compression saved %dKB (%d%%), %d frames, PSNR left=%d, right=%d', bytes_saved // 1000, (bytes_saved * 100) // original_size, frame_index-1, total_left_psnr // (frame_index-1), total_right_psnr // (frame_index-1))
                    with open('joint.csv', 'a') as f:
                        f.write(f'{gop1.id},{gop2.id},{frame_index-1},{total_right_psnr // (frame_index-1)},{total_left_psnr // (frame_index-1)}\n')
                    return bytes_saved
            else:
                return 0
コード例 #11
0
def solve(logical, resolution, roi, t, fps, codec):
    #return solve_naive(logical, roi, t, fps, codec)
    with log_runtime('Solver'):
        return solve_exact(logical, resolution, roi, t, fps, codec) or \
               solve_constraint(logical, resolution, roi, t, fps, codec)
コード例 #12
0
    fps = 30
    query_iterval = 3
    target_color = np.array([127, 127, 127])
    color_threshold = 50
    resolution = (1080, 1920)

    #    temp()

    with engine.VFS(transient=True) as instance:
        #        api.vacuum()
        if 'v' not in api.list():
            api.write("v", ingest_filename)

    with ProcessPoolExecutor(max_workers=clients) as pool:
        #with log_runtime('Index.VFS', level=logging.CRITICAL):
        #    index_vfs("v", duration, fps, query_iterval, 'index_cars_vfs.csv')
        #with log_runtime('Search.VFS', level=logging.CRITICAL):
        #    search_vfs("v", fps, target_color, color_threshold, 'index_cars_vfs.csv', 'index_colors_vfs.csv')
        with log_runtime('Stream.VFS', level=logging.CRITICAL):
            stream_vfs('v', resolution, fps, 'index_colors_vfs.csv')

        #with log_runtime('Index.FS', level=logging.CRITICAL):
        #    index_fs(ingest_filename, duration, fps, query_iterval, 'index_cars_fs.csv')
        #with log_runtime('Search.FS', level=logging.CRITICAL):
        #    search_fs(ingest_filename, fps, target_color, color_threshold, 'index_cars_fs.csv', 'index_colors_fs.csv')
        #with log_runtime('Stream.FS', level=logging.CRITICAL):
        #    stream_fs(ingest_filename, fps, 'index_colors_fs.csv')

        #futures = [pool.submit(ingest_vfs, ingest_filename) for _ in range(clients)]
        #futures = [pool.submit(index_vfs, "v1613333241981", 3600, 30, 3) for _ in range(clients)]
        #wait(futures)