Esempio n. 1
0
def test_overlapping_slice(cl):
    input = NamedVideoStream(cl, 'test1')
    frame = cl.io.Input([input])
    slice_frame = cl.streams.Slice(frame,
                                   partitions=[
                                       cl.partitioner.strided_ranges(
                                           [(0, 15), (5, 25), (15, 35)], 1)
                                   ])
    sample_frame = cl.streams.Range(slice_frame,
                                    ranges=[
                                        SliceList([
                                            {
                                                'start': 0,
                                                'end': 10
                                            },
                                            {
                                                'start': 5,
                                                'end': 15
                                            },
                                            {
                                                'start': 5,
                                                'end': 15
                                            },
                                        ])
                                    ])
    unsliced_frame = cl.streams.Unslice(sample_frame)
    output = NamedStream(cl, 'test_slicing')
    output_op = cl.io.Output(unsliced_frame, [output])
    cl.run(output_op,
           PerfParams.estimate(),
           cache_mode=CacheMode.Overwrite,
           show_progress=False)
    assert output.len() == 30
Esempio n. 2
0
def test_bind_op_args(cl):
    input = NamedVideoStream(cl, 'test1')
    frame = cl.io.Input([input, input])
    range_frame = cl.streams.Range(frame,
                                   ranges=[{
                                       'start': 0,
                                       'end': 1
                                   } for _ in range(2)])
    test_out = cl.ops.TestPy(frame=range_frame,
                             kernel_arg=1,
                             x=[1, 10],
                             y=[5, 50])
    outputs = [NamedStream(cl, 'test_hist_0'), NamedStream(cl, 'test_hist_1')]
    output_op = cl.io.Output(test_out, outputs)
    pairs = [(1, 5), (10, 50)]
    cl.run(output_op,
           PerfParams.estimate(),
           cache_mode=CacheMode.Overwrite,
           show_progress=False)

    for i, (x, y) in enumerate(pairs):
        values = list(outputs[i].load())
        p = values[0]
        assert p['x'] == x
        assert p['y'] == y
Esempio n. 3
0
def test_python_stencil_batch_kernel(cl):
    input = NamedVideoStream(cl, 'test1')
    frame = cl.io.Input([input])
    range_frame = cl.streams.Range(frame, ranges=[{'start': 0, 'end': 30}])
    test_out = cl.ops.TestPyStencilBatch(frame=range_frame, batch=50)
    output = NamedStream(cl, 'test_hist')
    output_op = cl.io.Output(test_out, [output])
    cl.run(output_op,
           PerfParams.estimate(),
           cache_mode=CacheMode.Overwrite,
           show_progress=False)
    next(output.load())
Esempio n. 4
0
def test_slice(cl):
    input = NamedVideoStream(cl, 'test1')
    frame = cl.io.Input([input])
    slice_frame = cl.streams.Slice(frame, partitions=[cl.partitioner.all(50)])
    unsliced_frame = cl.streams.Unslice(slice_frame)
    output = NamedStream(cl, 'test_slicing')
    output_op = cl.io.Output(unsliced_frame, [output])
    cl.run(output_op,
           PerfParams.estimate(),
           cache_mode=CacheMode.Overwrite,
           show_progress=False)
    assert input.len() == output.len()
Esempio n. 5
0
    def run(self, cl, device):
        input = NamedVideoStream(cl, 'test1_inplace')
        frame = cl.io.Input([input])
        hist = cl.ops.Histogram(frame=frame, device=device)
        output = NamedStream(cl, 'test_hist')
        output_op = cl.io.Output(hist, [output])

        cl.run(output_op,
               PerfParams.estimate(),
               cache_mode=CacheMode.Overwrite,
               show_progress=False)
        next(output.load())
Esempio n. 6
0
def test_unbounded_state(cl):
    input = NamedVideoStream(cl, 'test1')
    frame = cl.io.Input([input])
    slice_frame = cl.streams.Slice(frame, partitions=[cl.partitioner.all(50)])
    increment = cl.ops.TestIncrementUnbounded(ignore=slice_frame)
    unsliced_increment = cl.streams.Unslice(increment)
    output = NamedStream(cl, 'test_unbounded_state')
    output_op = cl.io.Output(unsliced_increment, [output])
    cl.run(output_op,
           PerfParams.estimate(),
           cache_mode=CacheMode.Overwrite,
           show_progress=False)
    assert output.len() == input.len()
Esempio n. 7
0
def test_wider_than_packet_stencil(cl):
    input = NamedVideoStream(cl, 'test1')
    frame = cl.io.Input([input])
    sample_frame = cl.streams.Range(frame, ranges=[{'start': 0, 'end': 3}])
    flow = cl.ops.OpticalFlow(frame=sample_frame, stencil=[0, 1])
    output = NamedStream(cl, 'test_stencil')
    output_op = cl.io.Output(flow, [output])

    cl.run(output_op,
           PerfParams.manual(1, 1, pipeline_instances_per_node=1),
           cache_mode=CacheMode.Overwrite,
           show_progress=False)

    assert output.len() == 3
Esempio n. 8
0
def get_maskrcnn_by_fid(sc, video_name, fid):
    maskrcnn_stream = NamedStream(sc, video_name + '_maskrcnn')
    seq = sc.sequence(maskrcnn_stream._name)
    obj = seq.load(workers=1, rows=[fid])
    metadata = next(obj)

    if len(metadata) < 2:
        print(fid, len(metadata))
        return None, None
    PERSON_CATEGORY = maskrcnn_detection.CATEGORIES.index('person')
    # filter two player
    bbox_size_list = []
    for m in metadata:
        if int(m['label']) == PERSON_CATEGORY and m['score'] > 0.9:
            bbox_size_list += [m['bbox']['x2'] - m['bbox']['x1']]
        else:
            bbox_size_list += [0]

    top2 = np.argsort(bbox_size_list)[-2:]
    playerA, playerB = metadata[top2[0]], metadata[top2[1]]
    if playerA['bbox']['y1'] > playerB['bbox']['y1']:
        mask_fg = playerA['mask']
        mask_bg = playerB['mask']
    else:
        mask_fg = playerB['mask']
        mask_bg = playerA['mask']
    return mask_fg, mask_bg
Esempio n. 9
0
def test_slice_args(cl):
    frame = cl.io.Input([NamedVideoStream(cl, 'test1')])
    slice_frame = cl.streams.Slice(
        frame, [cl.partitioner.ranges([[0, 1], [1, 2], [2, 3]])])
    test = cl.ops.TestSliceArgs(frame=slice_frame,
                                arg=[SliceList([i for i in range(3)])])
    unsliced_frame = cl.streams.Unslice(test)
    output = NamedStream(cl, 'test_slicing')
    output_op = cl.io.Output(unsliced_frame, [output])
    cl.run(output_op,
           PerfParams.estimate(),
           cache_mode=CacheMode.Overwrite,
           show_progress=False)

    num_rows = 0
    list(output.load())
Esempio n. 10
0
 def run_spacer_job(spacer, spacing):
     frame = cl.io.Input([NamedVideoStream(cl, 'test1')])
     hist = cl.ops.Histogram(frame=frame)
     space_hist = spacer(input=hist, spacings=[spacing])
     output = NamedStream(cl, 'test_space')
     output_op = cl.io.Output(space_hist, [output])
     cl.run(output_op,
            PerfParams.estimate(),
            cache_mode=CacheMode.Overwrite,
            show_progress=False)
     return output
Esempio n. 11
0
def test_bounded_state(cl):
    warmup = 3

    frame = cl.io.Input([NamedVideoStream(cl, 'test1')])
    increment = cl.ops.TestIncrementBounded(ignore=frame, bounded_state=warmup)
    sampled_increment = cl.streams.Gather(increment,
                                          indices=[[0, 10, 25, 26, 27]])
    output = NamedStream(cl, 'test_bounded_state')
    output_op = cl.io.Output(sampled_increment, [output])
    cl.run(output_op,
           PerfParams.estimate(),
           cache_mode=CacheMode.Overwrite,
           show_progress=False)

    num_rows = 0
    expected_output = [0, warmup, warmup, warmup + 1, warmup + 2]
    for buf in output.load():
        (val, ) = struct.unpack('=q', buf)
        assert val == expected_output[num_rows]
        num_rows += 1
    assert num_rows == 5
Esempio n. 12
0
def get_densepose_by_fid(sc, video_name, fid):
    def fid2idx(fid):
        return frame_ids_dict[video_name].index(fid)

    densepose_stream = NamedStream(sc, video_name[:-4] + '_densepose')
    seq = sc.sequence(densepose_stream._name)
    obj = seq.load(workers=1, rows=[fid2idx(fid)])
    metadata = next(obj)

    assert len(metadata) >= 2, "Player not detected!"

    # filter two player by shoulder dist
    # shoulder_dist = []
    # for person in metadata:
    #     if person['keyp'][2, Person.LShoulder] < Person.KP_THRESH or person['keyp'][2, Person.RShoulder] < Person.KP_THRESH:
    #         shoulder_dist += [-1]
    #     else:
    #         shoulder_dist += [np.linalg.norm(person['keyp'][:2, Person.LShoulder] - person['keyp'][:2, Person.RShoulder])]
    # top2 = np.argsort(shoulder_dist)[-2:]
    # personA = Person(metadata[top2[0]]['bbox'], metadata[top2[0]]['keyp'], metadata[top2[0]]['mask'], score=metadata[top2[0]]['score'])
    # personB = Person(metadata[top2[1]]['bbox'], metadata[top2[1]]['keyp'], metadata[top2[1]]['mask'], score=metadata[top2[1]]['score'])
    # if personA.keyp[1, 5] >= personB.keyp[1, 5]:
    #     person_fg = personA
    #     person_bg = personB
    # else:
    #     person_fg = personB
    #     person_bg = personA

    # filter two player by bbox area
    bbox_area = []
    for person in metadata:
        area = (person['bbox'][2] - person['bbox'][0]) * (person['bbox'][3] -
                                                          person['bbox'][1])
        bbox_area.append(area)
    top2 = np.argsort(bbox_area)[-2:]
    person_fg = person_bg = None
    personA = Person(metadata[top2[0]]['bbox'],
                     metadata[top2[0]]['keyp'],
                     metadata[top2[0]]['mask'],
                     score=metadata[top2[0]]['score'])
    personB = Person(metadata[top2[1]]['bbox'],
                     metadata[top2[1]]['keyp'],
                     metadata[top2[1]]['mask'],
                     score=metadata[top2[1]]['score'])
    if personA.keyp[1, Person.LShoulder] >= personB.keyp[1, Person.LShoulder]:
        person_fg = personA
        person_bg = personB
    else:
        person_fg = personB
        person_bg = personA

    return person_fg, person_bg
Esempio n. 13
0
def test_auto_ingest(cl):
    (vid1_path, vid2_path) = download_videos()
    input = NamedVideoStream(cl, 'test3', path=vid1_path)
    frame = cl.io.Input([input])
    hist = cl.ops.Histogram(frame=frame)
    output = NamedStream(cl, 'test_hist')
    output_op = cl.io.Output(hist, [output])
    cl.run(output_op,
           PerfParams.estimate(),
           cache_mode=CacheMode.Overwrite,
           show_progress=False)

    run(['rm', '-rf', vid1_path, vid2_path])
Esempio n. 14
0
def test_fetch_resources(cl):
    with tempfile.NamedTemporaryFile() as f:
        f.write(b'0')
        f.flush()

        input = NamedVideoStream(cl, 'test1')
        frame = cl.io.Input([input])
        range_frame = cl.streams.Range(frame, ranges=[{'start': 0, 'end': 3}])
        test_out = cl.ops.ResourceTest(frame=frame, path=f.name)
        output = NamedStream(cl, 'test_hist')
        output_op = cl.io.Output(test_out, [output])
        cl.run(output_op,
               PerfParams.estimate(pipeline_instances_per_node=2),
               cache_mode=CacheMode.Overwrite,
               show_progress=False)
Esempio n. 15
0
def test_perf_params(cl):
    frame = cl.io.Input([NamedVideoStream(cl, 'test1')])
    hist = cl.ops.Histogram(frame=frame)
    ghist = cl.streams.Gather(hist, [[0]])
    output_op = cl.io.Output(ghist, [NamedStream(cl, '_ignore')])

    cl.run(output_op,
           PerfParams.manual(10, 10),
           show_progress=False,
           cache_mode=CacheMode.Overwrite)

    cl.run(output_op,
           PerfParams.estimate(),
           show_progress=False,
           cache_mode=CacheMode.Overwrite)
Esempio n. 16
0
def test_no_workers(no_workers_cl):
    cl = no_workers_cl

    input = NamedVideoStream(cl, 'test1')
    frame = cl.io.Input([input])
    hist = cl.ops.Histogram(frame=frame)
    output_op = cl.io.Output(hist, [NamedStream(cl, '_ignore')])

    exc = False
    try:
        cl.run(output_op,
               PerfParams.estimate(),
               show_progress=False,
               cache_mode=CacheMode.Overwrite)
    except ScannerException:
        exc = True

    assert exc
Esempio n. 17
0
def test_profiler(cl):
    frame = cl.io.Input([NamedVideoStream(cl, 'test1')])
    hist = cl.ops.Histogram(frame=frame)
    ghist = cl.streams.Gather(hist, [[0]])
    output_op = cl.io.Output(ghist, [NamedStream(cl, '_ignore')])

    time_start = time.time()
    job_id = cl.run(output_op,
                    PerfParams.estimate(),
                    show_progress=False,
                    cache_mode=CacheMode.Overwrite)
    print('Time', time.time() - time_start)
    profile = cl.get_profile(job_id)
    f = tempfile.NamedTemporaryFile(delete=False, suffix='.trace')
    f.close()
    profile.write_trace(f.name)
    profile.statistics()
    run(['rm', '-f', f.name])
Esempio n. 18
0
def test_pose(sc):
    vid = [NamedVideoStream(sc, 'test1')]
    frame = sc.io.Input(vid)
    frame_sample = sc.streams.Gather(frame, [list(range(0, 1000, 100))])
    pose = sc.ops.OpenPose(
        frame=frame_sample,
        device=DeviceType.GPU,
        pose_num_scales=6,
        pose_scale_gap=0.16,
        compute_hands=True,
        hand_num_scales=6,
        hand_scale_gap=0.16,
        compute_face=True,
        batch=5
    )
    output = NamedStream(sc, 'test1-pose')
    output_op = sc.io.Output(pose, [output])

    sc.run(output_op, PerfParams.estimate())
Esempio n. 19
0
def main():

    print("Prepare videos and frames")
    # video = Video.objects.filter(path__contains='men_single_final_gold')[0]
    # movie_path = video.path
    # movie_name = video.item_name()

    sc = Client()
    stride = 1
    input_stream = NamedVideoStream(sc, movie_name, path=movie_path)
    frame = sc.io.Input([input_stream])
    strided_frame = sc.streams.Stride(frame, [stride])

    print('Running Scanner MaskRCNN op')
    maskrcnn_frame = sc.ops.MaskRCNNDetectObjects(
        frame=strided_frame,
        device=DeviceType.GPU,  #if sc.has_gpu() else DeviceType.CPU,
        batch=8,
        confidence_threshold=0.5,
        min_image_size=800)
    maskrcnn_stream = NamedStream(sc, movie_name + '_maskrcnn')
    output_op = sc.io.Output(maskrcnn_frame, [maskrcnn_stream])
    sc.run(output_op,
           scannerpy.common.PerfParams.estimate(),
           cache_mode=scannerpy.CacheMode.Ignore)

    print('Writing MaskRCNN metadata into frames')
    drawn_frame = sc.ops.DrawMaskRCNN(frame=strided_frame,
                                      bundled_data=sc.io.Input(
                                          [maskrcnn_stream]),
                                      min_score_thresh=0.5)
    drawn_stream = NamedVideoStream(sc, movie_name + '_maskrcnn_draw')
    output_op = sc.io.Output(drawn_frame, [drawn_stream])
    sc.run(output_op,
           scannerpy.common.PerfParams.estimate(),
           cache_mode=scannerpy.CacheMode.Overwrite)
    drawn_stream.save_mp4('/app/result/' + movie_name + '_maskrcnn')
Esempio n. 20
0
    def load(self, ty=None, fn=None, rows=None, workers=16):
        """
        Loads the results of a Scanner computation into Python.

        Kwargs:
            fn: Optional function to apply to the binary blobs as they are read
                in.

        Returns:
            Generator that yields either a numpy array for frame columns or
            a binary blob for non-frame columns (optionally processed by the
            `fn`).
        """

        self._load_meta()
        # If the column is a video, then dump the requested frames to disk as
        # PNGs and return the decoded PNGs
        if (self._descriptor.type == protobufs.Video
                and self._video_descriptor.codec_type ==
                protobufs.VideoDescriptor.H264):
            png_table_name = self._sc._png_dump_prefix.format(
                self._table.name(), self._name)
            frame = self._sc.io.Input([NamedVideoStream(self._sc, self._table.name())])
            enc_input = frame
            if rows is not None:
                sampled_frame = self._sc.streams.Gather(frame, indices=[rows])
                enc_input = sampled_frame
            img = self._sc.ops.ImageEncoder(frame=enc_input)
            output = [NamedStream(self._sc, png_table_name)]
            output_op = self._sc.io.Output(img, output)
            self._sc.run(output_op, PerfParams.estimate(), cache_mode=CacheMode.Overwrite, show_progress=False)
            return output[0].load()
        elif self._descriptor.type == protobufs.Video:
            frame_type = self._video_descriptor.frame_type
            if frame_type == protobufs.U8:
                dtype = np.uint8
            elif frame_type == protobufs.F32:
                dtype = np.float32
            elif frame_type == protobufs.F64:
                dtype = np.float64

            def raw_frame_gen(shape0, shape1, shape2, typ):
                def parser(bufs):
                    output = np.frombuffer(bufs, dtype=typ)
                    return output.reshape((shape0, shape1, shape2))

                return parser

            parser_fn = raw_frame_gen(
                self._video_descriptor.height, self._video_descriptor.width,
                self._video_descriptor.channels, dtype)
            return self._load(fn=parser_fn, rows=rows, workers=workers)
        else:
            # Use a deserialize function if provided.
            # If not, use a type if provided.
            # If not, attempt to determine the type from the column's table descriptor.
            # If that doesn't work, then assume no deserialization function, and return bytes.
            if fn is None:
                if ty is None:
                    type_name = self._descriptor.type_name
                    if type_name != "":
                        ty = scannertypes.get_type_info_cpp(type_name)

                if ty is not None:
                    fn = ty.deserialize


            return self._load(fn, rows=rows, workers=workers)
Esempio n. 21
0
def main():

    print("Prepare videos and frames")
    video = Video.objects.filter(path__contains='wim')[0]
    video_path = video.path
    video_name = video.item_name()

    sc = Client()
    stride = 1
    input_stream = NamedVideoStream(sc, video_name, path=video_path)
    frame = sc.io.Input([input_stream])

    # run on all frames
    # running_frame = sc.streams.Stride(frame, [stride])

    hit_annotation = pickle.load(
        open('/app/data/pkl/hit_annotation_tennis.pkl',
             'rb'))[video_name + '.mp4']
    # run on selected frames

    # 	hit_dict = []
    # 	for h in hit_annotation.values():
    # 		hit_dict += h
    hit_dict = hit_annotation

    # 	frame_ids = [i for point in hit_dict for i in range(point[0]['fid']-25, point[-1]['fid']+25) ]
    frame_ids = [
        i for point in hit_dict for i in range(point[0] - 25, point[-1] + 25)
    ]

    frame_ids.sort()
    running_frame = sc.streams.Gather(frame, [frame_ids])

    print('Running Scanner DensePose op on %d frames' % (len(frame_ids)))
    densepose_frame = sc.ops.DensePoseDetectPerson(frame=running_frame,
                                                   device=DeviceType.GPU,
                                                   batch=1,
                                                   confidence_threshold=0.5,
                                                   nms_threshold=0.2)
    densepose_stream = NamedStream(sc, video_name + '_densepose')
    output_op = sc.io.Output(densepose_frame, [densepose_stream])
    sc.run(output_op,
           scannerpy.common.PerfParams.estimate(),
           cache_mode=scannerpy.CacheMode.Overwrite)

    exit()

    print('Writing DensePose metadata into frames')
    drawn_frame = sc.ops.DrawDensePose(frame=running_frame,
                                       bundled_data=sc.io.Input(
                                           [densepose_stream]),
                                       min_score_thresh=0.5,
                                       show_body=True)
    drawn_stream = NamedVideoStream(sc, video_name + '_densepose_draw_uvbody')
    output_op = sc.io.Output(drawn_frame, [drawn_stream])
    sc.run(output_op,
           scannerpy.common.PerfParams.estimate(),
           cache_mode=scannerpy.CacheMode.Overwrite)
    drawn_stream.save_mp4('/app/result/' + video_name + '_densepose_uvbody')

    drawn_frame = sc.ops.DrawDensePose(frame=running_frame,
                                       bundled_data=sc.io.Input(
                                           [densepose_stream]),
                                       min_score_thresh=0.5,
                                       show_body=False)
    drawn_stream = NamedVideoStream(sc, video_name + '_densepose_draw_full')
    output_op = sc.io.Output(drawn_frame, [drawn_stream])
    sc.run(output_op,
           scannerpy.common.PerfParams.estimate(),
           cache_mode=scannerpy.CacheMode.Overwrite)
    drawn_stream.save_mp4('/app/result/' + video_name + '_densepose_full')
Esempio n. 22
0
def test_fault_tolerance(fault_cl):
    force_kill_spawn_port = 5012
    normal_spawn_port = 5013

    def worker_killer_task(config, master_address):
        from scannerpy import Config, start_worker, protobufs
        import time
        import grpc
        import subprocess
        import signal
        import os

        import scanner.metadata_pb2 as metadata_types
        import scanner.engine.rpc_pb2 as rpc_types
        import scanner.types_pb2 as misc_types

        # Spawn a worker that we will force kill
        script_dir = os.path.dirname(os.path.realpath(__file__))
        with open(os.devnull, 'w') as fp:
            p = subprocess.Popen([
                'python3 ' + script_dir +
                '/spawn_worker.py {:d}'.format(force_kill_spawn_port)
            ],
                                 shell=True,
                                 stdout=fp,
                                 stderr=fp,
                                 preexec_fn=os.setsid)

            # Wait a bit for the worker to do its thing
            time.sleep(10)

            # Force kill worker process to trigger fault tolerance
            os.killpg(os.getpgid(p.pid), signal.SIGTERM)
            p.kill()
            p.communicate()

            # Wait for fault tolerance to kick in
            time.sleep(15)

            # Spawn the worker again
            subprocess.call([
                'python3 ' + script_dir +
                '/spawn_worker.py {:d}'.format(normal_spawn_port)
            ],
                            shell=True)

    master_addr = fault_cl._master_address
    killer_process = Process(target=worker_killer_task,
                             args=(fault_cl.config, master_addr))
    killer_process.daemon = True
    killer_process.start()

    input = NamedVideoStream(fault_cl, 'test1')
    frame = fault_cl.io.Input([input])
    range_frame = fault_cl.streams.Range(frame,
                                         ranges=[{
                                             'start': 0,
                                             'end': 20
                                         }])
    sleep_frame = fault_cl.ops.SleepFrame(ignore=range_frame)
    output = NamedStream(fault_cl, 'test_fault')
    output_op = fault_cl.io.Output(sleep_frame, [output])

    fault_cl.run(output_op,
                 PerfParams.estimate(pipeline_instances_per_node=1),
                 cache_mode=CacheMode.Overwrite,
                 show_progress=False)

    assert output.len() == 20

    # Shutdown the spawned worker
    channel = grpc.insecure_channel('localhost:' + str(normal_spawn_port),
                                    options=[('grpc.max_message_length',
                                              24499183 * 2)])
    worker = protobufs.WorkerStub(channel)

    try:
        worker.Shutdown(protobufs.Empty())
    except grpc.RpcError as e:
        status = e.code()
        if status == grpc.StatusCode.UNAVAILABLE:
            print('could not shutdown worker!')
            exit(1)
        else:
            raise ScannerException(
                'Worker errored with status: {}'.format(status))
    killer_process.join()