예제 #1
0
def main():
    # Now we can use these new Ops in Scanner:
    sc = sp.Client()

    # Download an example video
    example_video_path = util.download_video()

    # Create a stream and input to read our example video
    video_stream = sp.NamedVideoStream(sc, 'example', path=example_video_path)
    frames = sc.io.Input([video_stream])

    resized_fn_frames = sc.ops.resize_fn(frame=frames, width=640, height=480)

    resized_class_frames = sc.ops.ResizeClass(frame=frames,
                                              width=320,
                                              height=240)

    fn_stream = sp.NamedVideoStream(sc, 'fn_frames')
    fn_output = sc.io.Output(resized_fn_frames, [fn_stream])

    class_stream = sp.NamedVideoStream(sc, 'class_frames')
    class_output = sc.io.Output(resized_class_frames, [class_stream])

    sc.run([fn_output, class_output], sp.PerfParams.estimate())

    fn_stream.save_mp4('01_resized_fn')
    class_stream.save_mp4('01_resized_class')

    for stream in [fn_stream, class_stream]:
        stream.delete(sc)

    print('Finished! Two videos were saved to the current directory: '
          '01_resized_fn.mp4, 01_resized_class.mp4')
예제 #2
0
def main():
    sc = sp.Client()

    # Create a stored stream to represent the input video
    input_stream = sp.NamedVideoStream(sc,
                                       'sample-clip',
                                       path='sample-clip.mp4')

    # Define a Computation Graph
    frames = sc.io.Input([input_stream])
    sampled_frames = sc.streams.Stride(frames, [2])  # Select every other frame
    resized_frames = sc.ops.Resize(frame=sampled_frames,
                                   width=[640],
                                   height=[480])  # Resize input frame
    grayscale_frames = sc.ops.ConvertColor(frame=resized_frames,
                                           conversion=['COLOR_RGB2GRAY'])
    grayscale3_frames = sc.ops.CloneChannels(frame=grayscale_frames,
                                             replications=3)

    # Create a stored stream to represent the output video
    output_stream = sp.NamedVideoStream(sc, 'sample-grayscale')
    output = sc.io.Output(grayscale3_frames, [output_stream])

    # Execute the computation graph
    sc.run(output, sp.PerfParams.manual(50, 250))

    # Save the resized video as an mp4 file
    output_stream.save_mp4('sample-grayscale')

    input_stream.delete(sc)
    output_stream.delete(sc)
예제 #3
0
def main():
    # Look at resize_op/resize_op.cpp to start this tutorial.

    sc = sp.Client()

    cwd = os.path.dirname(os.path.abspath(__file__))
    if not os.path.isfile(os.path.join(cwd, 'resize_op/build/libresize_op.so')):
        print(
            'You need to build the custom op first: \n'
            '$ pushd {}/resize_op; mkdir build && cd build; cmake ..; make; popd'.
            format(cwd))
        exit()

    # To load a custom op into the Scanner runtime, we use db.load_op to open the
    # shared library we compiled. If the op takes arguments, it also optionally
    # takes a path to the generated python file for the arg protobuf.
    sc.load_op(
        os.path.join(cwd, 'resize_op/build/libresize_op.so'),
        os.path.join(cwd, 'resize_op/build/resize_pb2.py'))

    example_video_path = util.download_video()
    video_stream = sp.NamedVideoStream(sc, 'example', path=example_video_path)
    frames = sc.io.Input([video_stream])

    # Then we use our op just like in the other examples.
    resized_frames = sc.ops.MyResize(frame=frames, width=200, height=300)

    output_stream = sp.NamedVideoStream(sc, 'example_resized')
    output = sc.io.Output(resized_frames, [output_stream])

    sc.run(output, sp.PerfParams.estimate())

    video_stream.delete(sc)
    output_stream.delete(sc)
예제 #4
0
def main():
    sc = sp.Client()

    # Frames on disk can either be stored uncompressed (raw bits) or compressed
    # (encoded using some form of image or video compression). When Scanner
    # reads frames from a table, it automatically decodes the data if necessary.
    # The Op DAG only sees the raw frames. For example, this table is stored
    # as compressed video.
    def make_blurred_frame(streams):
        frames = sc.io.Input(streams)
        blurred_frames = sc.ops.Blur(frame=frames, kernel_size=3, sigma=0.5)
        sampled_frames = sc.streams.Range(blurred_frames, [(0, 30)])
        return frames, sampled_frames

    example_video_path = util.download_video()
    video_stream = sp.NamedVideoStream(sc, 'example', path=example_video_path)

    # By default, if an Op outputs a frame with 3 channels with type uint8,
    # those frames will be compressed using video encoding. No other frame
    # type is currently compressed.
    frame, blurred_frame = make_blurred_frame([video_stream])

    stream = sp.NamedVideoStream(sc, 'output_table_name')
    output = sc.io.Output(blurred_frame, [stream])
    sc.run(output, sp.PerfParams.estimate())

    stream.delete(sc)

    frame, blurred_frame = make_blurred_frame([video_stream])
    # The compression parameters can be controlled by annotating the output
    # of an Op that produces frames
    low_quality_frame = blurred_frame.compress_video(quality=35)

    low_quality_stream = sp.NamedVideoStream(sc, 'low_quality_video')
    output = sc.io.Output(low_quality_frame, [low_quality_stream])
    sc.run(output, sp.PerfParams.estimate())

    frame, blurred_frame = make_blurred_frame([video_stream])
    # If no compression is desired, this can be specified by indicating that
    # the Op output should be lossless.
    lossless_frame = blurred_frame.lossless()

    lossless_stream = sp.NamedVideoStream(sc, 'lossless_video')
    output = sc.io.Output(lossless_frame, [lossless_stream])

    sc.run(output, sp.PerfParams.estimate())

    # Any sequence of frames which are saved as a compressed `NamedVideoStream` can
    # be exported as an mp4 file by calling save_mp4 on the stream. This will output
    # a file called 'low_quality_video.mp4' in the current directory.
    low_quality_stream.save_mp4('low_quality_video')

    low_quality_stream.delete(sc)
    lossless_stream.delete(sc)
def main():

    os.environ['PATH'] = os.environ['PATH'] + ":."

    example_video_path = 'star_wars_heros.mp4'
    ''' Get the media locally '''
    if not os.path.isfile(example_video_path):
        print("File does not exist: %s" % example_video_path)
        retcode = oscmd(
            "wget https://storage.googleapis.com/scanner-data/tutorial_assets/star_wars_heros.mp4"
        )
    partlist = [example_video_path]
    print('Connecting to Scanner database/client...')
    db = scan.Client()
    print(db.summarize())
    db.load_op("/opt/scanner/examples/tutorials/resize_op/libresize_op.so",
               "/opt/scanner/examples/tutorials/resize_op/resize_pb2.py")
    instreamlist = []
    outstreamlist = []
    for fname in partlist:
        if not os.path.isfile(fname):
            print("Input file %s does not exist, skipping" % fname)
            continue

        bname = os.path.basename(fname)[:-4]  # drop the suffix
        instreamlist.append(scan.NamedVideoStream(db, bname, path=fname))
        outfname = 'processed-%s-%s' % (humandate(time.time()), bname)
        outstreamlist.append(scan.NamedVideoStream(db, outfname))

    input_frames = db.io.Input(instreamlist)
    ''' Pipeline '''
    resized_frames = db.ops.MyResize(frame=input_frames, width=640, height=480)
    #     face_frames = db.ops.MTCNNDetectFaces(frame=resized_frames)
    #     boxed_face_frames = db.ops.DrawBboxes(frame=resized_frames, bboxes=face_frames)

    run_frames = resized_frames

    output = db.io.Output(run_frames, outstreamlist)

    db.run(output,
           scan.PerfParams.estimate(),
           cache_mode=scan.CacheMode.Ignore)
    OUTDIR = "."
    for output_stream in outstreamlist:
        if os.path.isdir(OUTDIR):
            sname = os.path.join(OUTDIR, output_stream.name())
            print("Saving %s" % sname)
            output_stream.save_mp4(sname)

    print(db.summarize())

    print('Complete!')
예제 #6
0
파일: kube.py 프로젝트: tarsbase/scanner
 def client(self, retries=3, **kwargs):
     while True:
         try:
             return scannerpy.Client(
                 master=self.master_address(),
                 start_cluster=False,
                 config_path=self._cluster_config.scanner_config,
                 **kwargs)
         except scannerpy.ScannerException:
             if retries == 0:
                 raise
             else:
                 retries -= 1
예제 #7
0
def main():
    sc = sp.Client()

    example_video_path = util.download_video()
    video_stream = sp.NamedVideoStream(sc, 'example', path=example_video_path)

    frames = sc.io.Input([video_stream])

    # You can tell Scanner which frames of the video (or which rows of a video
    # table) you want to sample. Here, we indicate that we want to stride
    # the frame column by 4 (select every 4th frame)
    strided_frames = sc.streams.Stride(frames, [4])

    # We process the sampled frame same as before.
    hists = sc.ops.Histogram(frame=strided_frames)

    hist_stream = sp.NamedVideoStream(sc, 'example_hist_strided')
    output = sc.io.Output(hists, [hist_stream])

    sc.run(output, sp.PerfParams.estimate())

    # Loop over the column's rows. Each row is a tuple of the frame number and
    # value for that row.
    video_hists = hist_stream.load()
    num_rows = 0
    for frame_hists in video_hists:
        assert len(frame_hists) == 3
        assert frame_hists[0].shape[0] == 16
        num_rows += 1
    assert num_rows == round(video_stream.len() / 4)

    video_stream.delete(sc)
    hist_stream.delete(sc)

    # Here's some examples of other sampling modes:

    # Range takes a specific subset of a video. Here, it runs over all frames
    # from 0 to 100
    sc.streams.Range(frames, [(0, 100)])

    # Gather takes an arbitrary list of frames from a video.
    sc.streams.Gather(frames, [[10, 17, 32]])
예제 #8
0
def main():
    sc = sp.Client()

    # What if, instead of a video, you had a list of image files that you
    # wanted to process? Scanner provides an extensible interface for reading and
    # writing data to locations other than the database.

    # For example, let's download a few images now and create a list of their paths:

    util.download_images()
    image_paths = ['sample-frame-1.jpg', 'sample-frame-2.jpg', 'sample-frame-3.jpg']

    # Scanner provides a built-in source to read files from the local filesystem:

    image_stream = FilesStream(image_paths)

    compressed_images = sc.io.Input([image_stream])
    # Like with sc.sources.FrameColumn, we will bind the inputs to this source when
    # we define a job later on.

    # Let's write a pipeline that reads our images, resizes them, and writes them
    # back out as files to the filesystem.

    # Since the input images are compressed, we decompress them with the
    # ImageDecoder
    frames = sc.ops.ImageDecoder(img=compressed_images)

    resized_frames = sc.ops.Resize(frame=frames, width=[640], height=[360])

    # Rencode the image to jpg
    encoded_frames = sc.ops.ImageEncoder(frame=resized_frames, format='jpg')

    # Write the compressed images to files
    resized_paths = ['resized-1.jpg', 'resized-2.jpg', 'resized-3.jpg']
    resized_stream = FilesStream(resized_paths)
    output = sc.io.Output(encoded_frames, [resized_stream])

    sc.run(output, sp.PerfParams.estimate(), cache_mode=sp.CacheMode.Overwrite)

    print('Finished! Wrote the following images: ' + ', '.join(resized_paths))
예제 #9
0
def main():
    # Startup the Scanner runtime and setup a connection to it. Loads configuration from the
    # ~/.scanner.toml configuration file.
    sc = sp.Client()

    example_video_path = util.download_video()

    # Scanner processes videos by forming a graph of operations that operate
    # on input streams and produce output streams. For example, here we can
    # construct a `NamedVideoStream` which reads from an example video:
    video_stream1 = sp.NamedVideoStream(sc,
                                        'example1',
                                        path=example_video_path)

    # Now we can start constructing a computation graph. First, we need to declare
    # our input streams that we are going to be reading from. We'll use the
    # `NamedVideoStream` we just created to build an `Input` operation:
    frames = sc.io.Input([video_stream1])

    # The output of the `Input` op is an edge in the computation graph which represents
    # the sequence of values produced by `Input`, which in this case are frames from
    # the video stream we provided.

    # Now we will process the frames from `Input` using a `Histogram` op that computes
    # a color histogram for each frame.
    hists = sc.ops.Histogram(frame=frames)

    # Finally, we define an output stream to write the computed histograms to.
    # To do this, we will create a `NamedStream` (which is just like a `NamedVideoStream`
    # but for non-video data):
    named_stream1 = sp.NamedStream(sc, 'example1_hist')

    # Then, just like we defined an `Input` op to read the input stream, we'll define
    # an `Output` op to write to the output stream we just defined:
    output_op = sc.io.Output(hists, [named_stream1])

    # Now we can execute this computation graph to produce the output stream.
    # You'll see a progress bar while Scanner is computing the outputs.
    # Note that the .run function also takes as input a PerfParams object which contains some
    # parameters that tune the performance of the job, e.g. how many video frames can fit into memory.
    # By default, you can use PerfParams.estimate() which heuristically guesses an appropriate set of
    # parameters (but is not guaranteed to work!). Later tutorials will address how to tune these params.
    job_id = sc.run(output_op, sp.PerfParams.estimate())

    # Scanner also supports operating over batches of streams to allow for more parallelism.
    # For example, let's define a new graph that operates on two copies of our example video:
    named_stream1.delete(sc)
    video_stream2 = sp.NamedVideoStream(sc,
                                        'example2',
                                        path=example_video_path)
    frames = sc.io.Input([video_stream1, video_stream2])
    hists = sc.ops.Histogram(frame=frames)
    named_stream2 = sp.NamedStream(sc, 'example2_hist')
    output_op = sc.io.Output(hists, [named_stream1, named_stream2])

    job_id = sc.run(output_op, sp.PerfParams.estimate())

    # For each of the streams we provided to the one `Input` op in our graph, Scanner will
    # execute the computation graph on the frames from those streams independently. This
    # mechanism allows you to provide Scanner with potentially thousands of videos you
    # would like to process, up front. If Scanner was executing on a cluster of machines,
    # it would be able to parallelize the processing of those videos across the entire cluster.

    # Now that the graph has been processed, we can load the histograms from our computed stream:
    num_rows = 0
    for hist in named_stream1.load():
        assert len(hist) == 3
        assert hist[0].shape[0] == 16
        num_rows += 1
    assert num_rows == video_stream1.len()

    # Just to cleanup, we'll delete the streams we created:
    streams = [video_stream1, video_stream2, named_stream1, named_stream2]
    streams[0].storage().delete(sc, streams)
예제 #10
0
def main():

    os.environ['KUBECONFIG'] = cmd0("ls -tr /root/.kube/config")
    os.environ['PATH'] = os.environ['PATH'] + ":."
    os.environ['AWS_ACCESS_KEY_ID'] = cmd0(
        'grep aws_access_key_id /root/.aws/credentials|cut -f2 -d "="')
    os.environ['AWS_SECRET_ACCESS_KEY'] = cmd0(
        'grep aws_secret_access_key /root/.aws/credentials|cut -f2 -d "="')

    example_video_path = '/efsc/star_wars_heros.mp4'
    ''' Get the media locally '''
    if not os.path.isfile(example_video_path):
        print("File does not exist: %s" % example_video_path)
        retcode = oscmd(
            "wget -P /efsc/ https://storage.googleapis.com/scanner-data/tutorial_assets/star_wars_heros.mp4"
        )
    partlist = [example_video_path]
    print('Connecting to Scanner database/client...')
    print('Finding master IP...')
    ip = cmd0(
        "kubectl get services scanner-master --output json | jq -r '.status.loadBalancer.ingress[0].hostname'"
    )
    port = cmd0(
        "kubectl get svc/scanner-master -o json | jq '.spec.ports[0].port' -r")
    master = '{}:{}'.format(ip, port)
    print('Master ip: {:s}'.format(master))

    with open('config.toml', 'w') as f:
        config_text = cmds(
            "kubectl get configmaps scanner-configmap -o json | jq '.data[\"config.toml\"]' -r"
        )
        f.write(config_text)

    db = scan.Client(master=master,
                     start_cluster=False,
                     config_path='./config.toml',
                     grpc_timeout=60)
    print(db.summarize())
    db.load_op("/opt/scanner/examples/tutorials/resize_op/libresize_op.so",
               "/opt/scanner/examples/tutorials/resize_op/resize_pb2.py")
    instreamlist = []
    outstreamlist = []
    for fname in partlist:
        if not os.path.isfile(fname):
            print("Input file %s does not exist, skipping" % fname)
            continue

        bname = os.path.basename(fname)[:-4]  # drop the suffix
        instreamlist.append(scan.NamedVideoStream(db, bname, path=fname))
        outfname = 'processed-%s-%s' % (humandate(time.time()), bname)
        outstreamlist.append(scan.NamedVideoStream(db, outfname))

    input_frames = db.io.Input(instreamlist)
    ''' Pipeline '''
    resized_frames = db.ops.MyResize(frame=input_frames, width=640, height=480)
    #     sampled_frames = db.streams.Stride(resized_frames, [3])
    #     face_frames = db.ops.MTCNNDetectFaces(frame=sampled_frames)
    #     boxed_face_frames = db.ops.DrawBboxes(frame=sampled_frames, bboxes=face_frames)

    run_frames = resized_frames

    output = db.io.Output(run_frames, outstreamlist)

    db.run(output,
           scan.PerfParams.estimate(),
           cache_mode=scan.CacheMode.Ignore)
    OUTDIR = "."
    for output_stream in outstreamlist:
        if os.path.isdir(OUTDIR):
            sname = os.path.join(OUTDIR, output_stream.name())
            print("Saving %s" % sname)
            output_stream.save_mp4(sname)

    print(db.summarize())

    print('Complete!')
예제 #11
0
def main():
    sc = sp.Client()

    example_video_path = util.download_video()
    video_stream = sp.NamedVideoStream(sc, 'example', path=example_video_path)

    frame = sc.io.Input([video_stream])

    # When working with bounded or unbounded stateful operations, it is sometimes
    # useful to introduce boundaries between sequences of frames which restrict
    # state being shared between them. For example, if you are tracking objects
    # in a movie, you likely do not want the same trackers when the camera changes
    # scenes since the objects you were tracking are no longer there!

    # Scanner provides support for limiting state propagation across frames through
    # "slicing" operations.
    sliced_frame = sc.streams.Slice(frame, partitions=[sc.partitioner.all(50)])
    # Here, we sliced the input frame stream into chunks of 50 elements. What this
    # means is that any ops which process 'sliced_frame' will *only* be able to
    # maintain state within each chunk of 50 elements.

    # For example, let's say we grab the background subtraction op from the previous
    # tutorial (02_op_attributes) and want to run it on our example video:
    @sp.register_python_op(bounded_state=60)
    class BackgroundSubtraction(sp.Kernel):
        def __init__(self, config, alpha, threshold):
            self.config = config
            self.alpha = alpha
            self.thresh = threshold

        def reset(self):
            self.average_image = None

        def execute(self, frame: sp.FrameType) -> sp.FrameType:
            if self.average_image is None:
                self.average_image = frame

            mask = np.abs(frame - self.average_image) < 255 * self.thresh
            mask = np.any(mask, axis=2)

            masked_image = np.copy(frame)
            wmask = np.where(mask)
            masked_image[wmask[0], wmask[1], :] = 0

            self.average_image = (self.average_image * (1.0 - self.alpha) +
                                  frame * self.alpha)

            return masked_image

    frame = sc.io.Input([video_stream])

    # Imagine that there are scene changes at frames 1100, 1200, and 1400, To tell
    # scanner that we do not want background subtraction to cross these boundaries,
    # we can create a 'partitioner' which splits the input.
    scene_partitions = sc.partitioner.ranges([(1100, 1200), (1200, 1400)])

    # Now we slice the input frame sequence into these two partitions using a
    # slice operation
    sliced_frame = sc.streams.Slice(frame, partitions=[scene_partitions])

    # Then we perform background subtraction and indicate we need 60 prior
    # frames to produce correct output
    masked_frame = sc.ops.BackgroundSubtraction(frame=sliced_frame,
                                                alpha=0.02,
                                                threshold=0.05,
                                                bounded_state=60)
    # Since the background subtraction operation is done, we can unslice the
    # sequence to join it back into a single contiguous stream. You must unslice
    # sequences before feeding them back into sinks
    unsliced_frame = sc.streams.Unslice(masked_frame)

    stream = sp.NamedVideoStream(sc, '04_masked_video')
    output = sc.io.Output(unsliced_frame, [stream])

    sc.run(output, sp.PerfParams.estimate())

    stream.save_mp4('04_masked')
    stream.delete(sc)

    videos = []
    videos.append('04_masked.mp4')

    print('Finished! The following videos were written: {:s}'.format(
        ', '.join(videos)))
예제 #12
0
파일: main.py 프로젝트: tarsbase/scanner
def main():
    if len(sys.argv) <= 1:
        print('Usage: {:s} path/to/your/video/file.mp4'.format(sys.argv[0]))
        sys.exit(1)

    movie_path = sys.argv[1]
    print('Detecting objects in movie {}'.format(movie_path))
    movie_name = os.path.splitext(os.path.basename(movie_path))[0]

    sc = sp.Client()

    stride = 1
    input_stream = sp.NamedVideoStream(sc, movie_name, path=movie_path)
    frame = sc.io.Input([input_stream])
    strided_frame = sc.streams.Stride(frame, [stride])

    model_name = 'ssd_mobilenet_v1_coco_2017_11_17'
    model_url = MODEL_TEMPLATE_URL.format(model_name)
    objdet_frame = sc.ops.ObjDetect(
        frame=strided_frame,
        dnn_url=model_url,
        device=sp.DeviceType.GPU if sc.has_gpu() else sp.DeviceType.CPU,
        batch=2)

    detect_stream = sp.NamedVideoStream(sc, movie_name + '_detect')
    output_op = sc.io.Output(objdet_frame, [detect_stream])
    sc.run(output_op,
           sp.PerfParams.estimate(),
           cache_mode=sp.CacheMode.Overwrite)

    print('Extracting data from Scanner output...')
    # bundled_data_list is a list of bundled_data
    # bundled data format: [box position(x1 y1 x2 y2), box class, box score]
    bundled_data_list = list(tqdm(detect_stream.load()))
    print('Successfully extracted data from Scanner output!')

    # run non-maximum suppression
    bundled_np_list = kernels.nms_bulk(bundled_data_list)
    bundled_np_list = kernels.smooth_box(bundled_np_list, min_score_thresh=0.5)

    print('Writing frames to {:s}_obj_detect.mp4'.format(movie_name))

    frame = sc.io.Input([input_stream])
    bundled_data = sc.io.Input([PythonStream(bundled_np_list)])
    strided_frame = sc.streams.Stride(frame, [stride])
    drawn_frame = sc.ops.TFDrawBoxes(frame=strided_frame,
                                     bundled_data=bundled_data,
                                     min_score_thresh=0.5)
    drawn_stream = sp.NamedVideoStream(sc, movie_name + '_drawn_frames')
    output_op = sc.io.Output(drawn_frame, [drawn_stream])
    sc.run(output_op,
           sp.PerfParams.estimate(),
           cache_mode=sp.CacheMode.Overwrite)

    drawn_stream.save_mp4(movie_name + '_obj_detect')

    input_stream.delete(sc)
    detect_stream.delete(sc)
    drawn_stream.delete(sc)

    print('Successfully generated {:s}_obj_detect.mp4'.format(movie_name))
예제 #13
0
def main():
    videos = []
    # Keep track of the streams so we can delete them at the end
    streams = []

    sc = sp.Client()

    example_video_path = util.download_video()
    video_stream = sp.NamedVideoStream(sc, 'example', path=example_video_path)
    streams.append(video_stream)
    # Many ops simply involve applying some processing to their inputs and then
    # returning their outputs. But there are also many operations in video
    # processing that require the ability to see adjacent frames (such as for
    # computing optical flow), need to keep state over time (such as for tracking
    # objects), or need to process multiple elements for efficiency reasons (such as
    # batching for DNNs).

    # Scanner ops therefore have several optional attributes that enable them to
    # support these forms of operations:

    # 1. Device Type:
    #   Ops can specify that they require CPUs or GPUs by declaring their device
    #   type. By default, the device_type is DeviceType.CPU.

    @sp.register_python_op(device_type=sp.DeviceType.CPU)
    def device_resize(config, frame: sp.FrameType) -> sp.FrameType:
        return cv2.resize(frame, (config.args['width'], config.args['height']))

    frames = sc.io.Input([video_stream])

    resized_frames = sc.ops.device_resize(frame=frames, width=640, height=480)

    stream = sp.NamedVideoStream(sc, 'example_resize')
    streams.append(stream)
    output = sc.io.Output(resized_frames, [stream])

    sc.run(output, sp.PerfParams.estimate())

    stream.save_mp4('02_device_resize')
    videos.append('02_device_resize.mp4')

    # 2. Batch:
    #   The Op can receive multiple elements at once to enable SIMD or
    #   vector-style processing.

    @sp.register_python_op(batch=10)
    def batch_resize(config,
                     frame: Sequence[sp.FrameType]) -> Sequence[sp.FrameType]:
        output_frames = []
        for fr in frame:
            output_frames.append(
                cv2.resize(fr, (config.args['width'], config.args['height'])))
        return output_frames

    # Here we specify that the resize op should receive a batch of 10
    # input elements at once. Logically, each element is still processed
    # independently but multiple elements are provided to enable efficient
    # batch processing. If there are not enough elements left in a stream,
    # the Op may receive less than a batch worth of elements.

    frame = sc.io.Input([video_stream])

    resized_frame = sc.ops.batch_resize(frame=frame,
                                        width=640,
                                        height=480,
                                        batch=10)

    stream = sp.NamedVideoStream(sc, 'example_batch_resize')
    streams.append(stream)
    output = sc.io.Output(resized_frame, [stream])

    sc.run(output, sp.PerfParams.estimate())

    stream.save_mp4('02_batch_resize')
    videos.append('02_batch_resize.mp4')

    # 3. Stencil:
    #   The Op requires a window of input elements (for example, the
    #   previous and next element) at the same time to produce an
    #   output.

    # Here, we use the stencil attribute to write an optical flow op which
    # computes flow between the current and next frame.
    @sp.register_python_op(stencil=[0, 1])
    def optical_flow(config, frame: Sequence[sp.FrameType]) -> sp.FrameType:
        gray1 = cv2.cvtColor(frame[0], cv2.COLOR_BGR2GRAY)
        gray2 = cv2.cvtColor(frame[1], cv2.COLOR_BGR2GRAY)
        flow = cv2.calcOpticalFlowFarneback(gray1, gray2, None, 0.5, 3, 15, 3,
                                            5, 1.2, 0)
        return flow

    # This op visualizes the flow field by converting it into an rgb image
    @sp.register_python_op()
    def visualize_flow(config, flow: sp.FrameType) -> sp.FrameType:
        hsv = np.zeros(shape=(flow.shape[0], flow.shape[1], 3), dtype=np.uint8)
        hsv[..., 1] = 255

        mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
        hsv[..., 0] = ang * 180 / np.pi / 2
        hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
        rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
        return rgb

    frames = sc.io.Input([video_stream])

    # This next line is using a feature we'll discuss in the next tutorial, but you
    # can think of it as selecting a subset of elements from the stream (here,
    # frames 0 to 30)
    range_frames = sc.streams.Range(frames, [(0, 30)])
    flows = sc.ops.optical_flow(frame=range_frames, stencil=[0, 1])
    flow_viz_frames = sc.ops.visualize_flow(flow=flows)

    stream = sp.NamedVideoStream(sc, 'example_flow')
    streams.append(stream)
    output = sc.io.Output(flow_viz_frames, [stream])

    sc.run(output, sp.PerfParams.estimate())

    stream.save_mp4('02_flow')
    videos.append('02_flow.mp4')

    # 4. Bounded State:
    #   For each output, the Op requires at least W sequential
    #   "warmup" elements before it can produce a valid output.
    #   For example, if the output of this Op is sampled
    #   sparsely, this guarantees that the Op can "warmup"
    #   its state on a stream of W elements before producing the
    #   requested output.

    import subprocess

    @sp.register_python_op(bounded_state=60)
    class BackgroundSubtraction(sp.Kernel):
        def __init__(self, config, alpha, threshold):
            self.config = config
            self.alpha = alpha
            self.thresh = threshold

        # Reset is called when the kernel switches to a new part of the stream
        # and so shouldn't maintain it's previous state
        def reset(self):
            self.average_image = None

        def execute(self, frame: sp.FrameType) -> sp.FrameType:
            if self.average_image is None:
                self.average_image = frame

            mask = np.abs(frame - self.average_image) < 255 * self.thresh
            mask = np.any(mask, axis=2)

            masked_image = np.copy(frame)
            wmask = np.where(mask)
            masked_image[wmask[0], wmask[1], :] = 0

            self.average_image = (self.average_image * (1.0 - self.alpha) +
                                  frame * self.alpha)

            return masked_image

    # Here we wrote an op that performs background subtraction by keeping a
    # running average image over the past frames. We set `bounded_state=60`
    # to indicate that this kernel needs at least 60 frames before the output
    # should be considered reasonable.

    frames = sc.io.Input([video_stream])

    # We perform background subtraction and indicate we need 60 prior
    # frames to produce correct output
    masked_frames = sc.ops.BackgroundSubtraction(frame=frames,
                                                 alpha=0.05,
                                                 threshold=0.05,
                                                 bounded_state=60)
    # Here, we say that we only want the outputs for this range of frames
    sampled_frames = sc.streams.Range(masked_frames, [(0, 120)])

    stream = sp.NamedVideoStream(sc, 'masked_video')
    streams.append(stream)
    output = sc.io.Output(sampled_frames, [stream])

    sc.run(output, sp.PerfParams.estimate())

    stream.save_mp4('02_masked')
    videos.append('02_masked.mp4')

    # 5. Unbounded State:
    #     This Op will always process all preceding elements of
    #     its input streams before producing a requested output.
    #     This means that sampling operations after this Op
    #     can not change how many inputs it receives. In the next
    #     tutorial, we will show how this can be relaxed for
    #     sub-streams of the input.
    @sp.register_python_op(unbounded_state=True)
    class Example(sp.Kernel):
        def __init__(self, config):
            pass

        def reset(self):
            pass

        def execute(self, frame: sp.FrameType) -> bytes:
            pass

    for stream in streams:
        stream.delete(sc)

    print('Finished! The following videos were written: {:s}'.format(
        ', '.join(videos)))