示例#1
0
    def test_display_camera(self):
        from displayarray import display
        import numpy as np

        def black_and_white(arr):
            return (np.sum(arr, axis=-1) / 3).astype(np.uint8)

        display(0, callbacks=black_and_white, blocking=True)
示例#2
0
    def test_nested_frames(self):
        def nest_frame(frame):
            frame = np.asarray([[[[[[frame]]]]], [[[[[frame]]], [[[frame]]]]]])
            return frame

        display(0,
                callbacks=nest_frame,
                window_names=["1", "2", "3"],
                blocking=True)
示例#3
0
    def test_display_numpy_callback(self):
        from displayarray import display
        import numpy as np

        arr = np.random.normal(0.5, 0.1, (500, 500, 3))

        def fix_arr_cv(arr_in):
            arr_in[:] += np.random.normal(0.01, 0.005, (500, 500, 3))
            arr_in %= 1.0

        display(arr, callbacks=fix_arr_cv, blocking=True)
示例#4
0
def test_backpropogate_diffs_cpu():
    d = display(test_image_smol_smol, size=(1, 1))

    edge_kernel = np.random.randint(0, 127, midget_rgc(2).shape).astype(dtype=np.float32) / 127.0

    kernel_stride_x = kernel_stride_y = 1

    kernel_padding_x = kernel_padding_y = 1
    kernel_dilation_x = kernel_dilation_y = 0

    while d:
        if d.frames:
            frame = next(iter(d.frames.values()))[0].astype(np.float32)
            frame = frame[np.newaxis, ...]
            frame = np.swapaxes(frame, 1, 3)
            d_inp_image = np.zeros_like(frame)
            out_error = np.random.randint(0, 127, frame.shape).astype(dtype=np.float32) / 127.0
            d_kernel = np.zeros_like(edge_kernel)

            dense_conv_backward_2d_fast(frame,
                                        d_inp_image,
                                        edge_kernel,
                                        d_kernel,
                                        out_error,
                                        (kernel_stride_x, kernel_stride_y),
                                        (kernel_padding_x, kernel_padding_y),
                                        )
            out_error = np.swapaxes(out_error, 1, 3)
            out_error = out_error[0, ...]
            d.update(out_error, 'out err')

            d_inp_image = np.swapaxes(d_inp_image, 1, 3)
            d_inp_image = d_inp_image[0, ...]
            d.update(d_inp_image / d_inp_image.max(), 'in err')
            print(d_kernel)
示例#5
0
def test_denoise_cpu():
    d = display(0, size=(1, 1))

    edge_kernel = np.random.randint(0, 127, midget_rgc(2).shape).astype(dtype=np.float32) / 127.0

    kernel_stride_x = kernel_stride_y = 1

    kernel_padding_x = kernel_padding_y = 1
    kernel_dilation_x = kernel_dilation_y = 0

    while d:
        if d.frames:
            frame = next(iter(d.frames.values()))[0].astype(np.float32) / 256.0
            frame = cv2.resize(frame, dsize=(8,8), interpolation=cv2.INTER_NEAREST )
            frame = frame[np.newaxis, ...]
            frame = np.swapaxes(frame, 1, 3)
            output = np.zeros_like(frame)
            noise = np.random.randint(0, 127, frame.shape).astype(dtype=np.float32) / 256.0
            noised_frame = frame + noise


            dense_conv_forward_2d_fast(noised_frame,
                                       edge_kernel,
                                       output,
                                       (kernel_stride_x, kernel_stride_y),
                                       (kernel_padding_x, kernel_padding_y))


            out_error = output - frame
            d_inp_image = np.zeros_like(frame)
            d_kernel = np.zeros_like(edge_kernel)



            dense_conv_backward_2d_fast(frame,
                                        d_inp_image,
                                        edge_kernel,
                                        d_kernel,
                                        out_error,
                                        (kernel_stride_x, kernel_stride_y),
                                        (kernel_padding_x, kernel_padding_y),
                                        )
            frame = np.swapaxes(frame, 1, 3)
            frame = frame[0, ...]
            output = np.swapaxes(output, 1, 3)
            output = output[0, ...]
            out_error = np.swapaxes(out_error, 1, 3)
            out_error = out_error[0, ...]
            d_inp_image = np.swapaxes(d_inp_image, 1, 3)
            d_inp_image = d_inp_image[0, ...]
            noised_frame = np.swapaxes(noised_frame, 1, 3)
            noised_frame = noised_frame[0, ...]
            d.update(frame, 'mini frame')
            d.update(noised_frame, 'noised frame')
            d.update(output, 'out img')
            d.update(out_error, 'out err')
            d.update(d_inp_image / d_inp_image.max(), 'in err')
            print(d_kernel)
            d_half = (np.average(np.abs(d_kernel.copy())))/1e6
            edge_kernel -= d_kernel * d_half
示例#6
0
def main(argv=None):
    """Process command line arguments."""
    arguments = docopt(__doc__, argv=argv)
    if arguments["--version"]:
        from displayarray import __version__

        print(f"DisplayArray V{__version__}")
        return
    from displayarray import display

    vids = [int(w) for w in arguments["--webcam"]] + arguments["--video"]
    v_disps = None
    if vids:
        v_disps = display(*vids, blocking=False)
    from displayarray.frame.frame_updater import read_updates_ros, read_updates_zero_mq

    topics = arguments["--topic"]
    topics_split = [t.split(",") for t in topics]
    d = display()

    async def msg_recv():
        nonlocal d
        while d:
            if arguments["--message-backend"] == "ROS":
                async for v_name, frame in read_updates_ros(
                    [t for t, d in topics_split],
                    [d for t, d in topics_split]):
                    d.update(arr=frame, id=v_name)
            if arguments["--message-backend"] == "ZeroMQ":
                async for v_name, frame in read_updates_zero_mq(
                        *[bytes(t, encoding="ascii") for t in topics]):
                    d.update(arr=frame, id=v_name)

    async def update_vids():
        while v_disps:
            if v_disps:
                v_disps.update()
                await asyncio.sleep(0)

    async def runner():
        await asyncio.wait([msg_recv(), update_vids()])

    loop = asyncio.get_event_loop()
    loop.run_until_complete(runner())
    loop.close()
示例#7
0
    def test_display_numpy_loop(self):
        from displayarray import display
        import numpy as np

        arr = np.random.normal(0.5, 0.1, (100, 100, 3))

        with display(arr) as displayer:
            while displayer:
                arr[:] += np.random.normal(0.001, 0.0005, (100, 100, 3))
                arr %= 1.0
示例#8
0
    def test_display_video(self):
        from displayarray import display
        import math as m

        def forest_color(arr):
            forest_color.i += 1
            arr[...,
                0] = (m.sin(forest_color.i *
                            (2 * m.pi) * .4 / 360) * 255 + arr[..., 0]) % 255
            arr[..., 1] = (m.sin(
                (forest_color.i *
                 (2 * m.pi) * .5 + 45) / 360) * 255 + arr[..., 1]) % 255
            arr[...,
                2] = (m.cos(forest_color.i *
                            (2 * m.pi) * .3 / 360) * 255 + arr[..., 2]) % 255

        forest_color.i = 0

        display("fractal test.mp4",
                callbacks=forest_color,
                blocking=True,
                fps_limit=120)
示例#9
0
    def _init_cam(self, cam):
        """Initialize the input camera."""
        self._pre_crop_callback = crop.Crop(
            output_size=self.crop_settings.PRE_LENS).enable_mouse_control()
        self._lens_callback = lens.BarrelPyTorch()
        self._post_crop_callback = crop.Crop(
            output_size=self.crop_settings.POST_LENS).enable_mouse_control()

        if not isinstance(cam, Iterable):
            cam = [cam]

        self.cam = (display(
            *cam, size=self.crop_settings.CAM_SIZE_REQUEST).add_callback(
                self._pre_crop_callback).add_callback(
                    self._lens_callback).add_callback(
                        self._post_crop_callback).wait_for_init())
示例#10
0
    def test_display_tensorflow(self):
        from displayarray import display
        import numpy as np
        from tensorflow.keras import layers, models
        import tensorflow as tf

        for gpu in tf.config.experimental.list_physical_devices("GPU"):
            tf.compat.v2.config.experimental.set_memory_growth(gpu, True)

        displayer = display("fractal test.mp4")
        displayer.wait_for_init()
        autoencoder = models.Sequential()
        autoencoder.add(
            layers.Conv2D(20, (3, 3),
                          activation="sigmoid",
                          input_shape=displayer.frames[0].shape))
        autoencoder.add(
            layers.Conv2D(20, (3, 3),
                          activation="sigmoid",
                          input_shape=displayer.frames[0].shape))
        autoencoder.add(layers.Conv2DTranspose(3, (3, 3),
                                               activation="sigmoid"))
        autoencoder.add(layers.Conv2DTranspose(3, (3, 3),
                                               activation="sigmoid"))

        autoencoder.compile(loss="mse", optimizer="adam")

        while displayer:
            grab = tf.convert_to_tensor(
                displayer.FRAME_DICT["fractal test.mp4frame"][np.newaxis,
                                                              ...].astype(
                                                                  np.float32) /
                255.0)
            grab_noise = tf.convert_to_tensor(
                (((displayer.FRAME_DICT["fractal test.mp4frame"][
                    np.newaxis, ...].astype(np.float32) +
                   np.random.uniform(0, 255, grab.shape)) / 2) % 255) / 255.0)
            displayer.update((grab_noise.numpy()[0] * 255.0).astype(np.uint8),
                             "uid for grab noise")
            autoencoder.fit(grab_noise, grab, steps_per_epoch=1, epochs=1)
            output_image = autoencoder.predict(grab, steps=1)
            displayer.update((output_image[0] * 255.0).astype(np.uint8),
                             "uid for autoencoder output")
示例#11
0
def test_forward_rgc_cpu():
    d = display(test_image_smol, size=(1, 1))

    edge_kernel = midget_rgc(2)

    kernel_stride_x = kernel_stride_y = 1

    kernel_padding_x = kernel_padding_y = 1
    kernel_dilation_x = kernel_dilation_y = 0

    while d:
        if d.frames:
            frame = next(iter(d.frames.values()))

            in_frame = frame[0].astype(dtype=np.float32)

            edge_out = dense_conv_forward_2d(in_frame, edge_kernel,
                                             (kernel_stride_x, kernel_stride_y), (kernel_padding_x, kernel_padding_y))

            d.update(edge_out / 256.0, 'blur')
示例#12
0
def profile_reading(total_seconds=5):
    t_init = t01 = time.time()
    times = []
    started = False
    for up in display(0, size=(1, 1)):
        if up:
            t1 = time.time()
            if started:
                times.append((t1 - t01) * 1000)
            t01 = t1
            started = True
        if started:
            t2 = time.time()
            if t2 - t_init >= total_seconds:
                if times:
                    print(
                        f"Average framerate: {1000 / (sum(times) / len(times))}fps"
                    )
                else:
                    print("failure")
                break
        else:
            t_init = time.time()
示例#13
0
    def __iter__(self):
        """
        Run the virtual eye in a for loop frame by frame.

        returns the encoding and loss in a tuple by default.
        """
        prev = ScreenWatcher.State()
        with display() as d:
            while d:
                if self.pred_img is not None:
                    d.update_specific(
                        pytorch_image_to_cv(self.pred_img) / 256.0, "pred"
                    )
                d.update_specific(
                    self.get_screen(self.x, self.y, self.w, self.h), "grab"
                )
                d._display_frames(d.frames)
                if len(d.frames) > 0:
                    frame = d.frames[0]

                    prev = self._update_prev(frame, prev)

                    loss = self.train_recognizer(frame, prev.frame, prev.movement)

                    yield tuple(self._handle_iter_yields(loss))
                    self.bad_actions = 0  # reset now that we've yielded

                    prev.frame = frame.copy()

                    self._update_focal_point()
                    self._encode_focal_point_movement(prev)

                    self.recognition_system.model.set_movement_data_len(
                        prev.movement.size
                    )

        self.save()
示例#14
0
def test_forward_edge_detect_glsl():
    d = display(test_image_1, size=(1, 1))

    first_frame = True

    edge_kernel = midget_rgc(2)

    kernel_height = kernel_width = 3
    kernel_stride_x = kernel_stride_y = 1

    kernel_padding_x = kernel_padding_y = 1
    kernel_dilation_x = kernel_dilation_y = 0

    width = height = None

    test_computer = None

    buffi = buffo = None

    while d:
        if d.frames:
            frame = next(iter(d.frames.values()))
            if first_frame:
                height = frame[0].shape[0]
                width = frame[0].shape[1]
                in_channels = frame[0].shape[2]

                shader_file = open(
                    os.path.abspath(os.sep.join(['..', 'gltensors', 'shaders', 'dense_conv_forward_2d.glsl'])))
                simplex_shader = shader_file.read()
                shader_file.close()

                simplex_shader = glsl_import_filter(simplex_shader,
                                                    os.path.abspath(os.sep.join(['..', 'gltensors', 'shaders'])))

                test_computer = glcpu.GLSLComputer(simplex_shader,
                                                   width=width,
                                                   height=height,
                                                   in_channels=in_channels,
                                                   out_channels=in_channels,
                                                   kernel_width=kernel_width,
                                                   kernel_height=kernel_height,
                                                   kernel_stride_x=kernel_stride_x,
                                                   kernel_stride_y=kernel_stride_y,
                                                   kernel_padding_x=kernel_padding_x,
                                                   kernel_padding_y=kernel_padding_y,
                                                   kernel_dilation_x=kernel_dilation_x,
                                                   kernel_dilation_y=kernel_dilation_y
                                                   )

                first_frame = False

                buffk = test_computer.ctx.buffer(data=edge_kernel.astype(dtype=np.float32).tobytes())
                buffk.bind_to_storage_buffer(1)

            buffi = test_computer.ctx.buffer(data=frame[0].astype(dtype=np.float32).tobytes(), dynamic=True)
            buffi.bind_to_storage_buffer(0)

            buffo = test_computer.ctx.buffer(
                data=np.zeros_like(frame[0]).astype(dtype=np.float32).tobytes(), dynamic=True)
            buffo.bind_to_storage_buffer(2)

            # buffi.write(d.frames['0'][0].astype(dtype=np.float32).tobytes())
            # buffo.clear()

            test_computer.cpu.run(height, width)

            edge_out = np.frombuffer(buffo.read(), dtype=np.float32).reshape((height, width, in_channels))

            d.update(edge_out / edge_out.max(), 'blur')

            buffi.release()
            buffo.release()
示例#15
0
from displayarray import display
import numpy as np

display(np.random.normal(0.5, 0.1, (500, 500, 3))).block()
示例#16
0
from displayarray.effects import crop, lens
from displayarray import display
from examples.videos import test_video

# Move the mouse to move where the crop is from on the original image

display(test_video).add_callback(crop.Crop()).add_callback(
    lens.Barrel().enable_mouse_control()).block()
示例#17
0
def test_backpropogate_diffs():
    d = display(test_image_1, size=(1, 1))

    first_frame = True

    edge_kernel = np.zeros_like(midget_rgc(2))

    kernel_height = kernel_width = 3
    kernel_stride_x = kernel_stride_y = 1

    kernel_padding_x = kernel_padding_y = 1
    kernel_dilation_x = kernel_dilation_y = 0

    width = height = None

    test_computer = None

    buffkd = buffo = None

    while d:
        if d.frames:
            if first_frame:
                height = d.frames['0'][0].shape[0]
                width = d.frames['0'][0].shape[1]
                in_channels = d.frames['0'][0].shape[2]

                shader_file = open(
                    os.path.abspath(os.sep.join(['..', 'gltensors', 'shaders', 'dense_conv_2d_backward.glsl'])))
                simplex_shader = shader_file.read()
                shader_file.close()

                simplex_shader = glsl_import_filter(simplex_shader,
                                                    os.path.abspath(os.sep.join(['..', 'gltensors', 'shaders'])))

                test_computer = glcpu.GLSLComputer(simplex_shader,
                                                   width=width,
                                                   height=height,
                                                   in_channels=in_channels,
                                                   out_channels=in_channels,
                                                   kernel_width=kernel_width,
                                                   kernel_height=kernel_height,
                                                   kernel_stride_x=kernel_stride_x,
                                                   kernel_stride_y=kernel_stride_y,
                                                   kernel_padding_x=kernel_padding_x,
                                                   kernel_padding_y=kernel_padding_y,
                                                   kernel_dilation_x=kernel_dilation_x,
                                                   kernel_dilation_y=kernel_dilation_y
                                                   )

                first_frame = False

                buffk = test_computer.ctx.buffer(data=edge_kernel.astype(dtype=np.float32).tobytes())
                buffk.bind_to_storage_buffer(4)

                buffkd = test_computer.ctx.buffer(data=edge_kernel.astype(dtype=np.float32).tobytes())
                buffkd.bind_to_storage_buffer(1)

            buffi = test_computer.ctx.buffer(
                np.random.randint(0, 127, d.frames['0'][0].shape).astype(dtype=np.float32).tobytes(), dynamic=True)
            buffi.bind_to_storage_buffer(3)

            buffid = test_computer.ctx.buffer(data=np.zeros_like(d.frames['0'][0]).astype(dtype=np.float32).tobytes(),
                                              dynamic=True)
            buffid.bind_to_storage_buffer(0)

            buffod = test_computer.ctx.buffer(
                data=np.random.randint(0, 127, d.frames['0'][0].shape).astype(dtype=np.float32).tobytes(), dynamic=True)
            buffod.bind_to_storage_buffer(2)

            test_computer.cpu.run(height, width)

            ind_out = np.frombuffer(buffid.read(), dtype=np.float32).reshape((height, width, in_channels))
            kd_out = np.frombuffer(buffkd.read(), dtype=np.float32).reshape(edge_kernel.shape)

            buffi.release()
            buffid.release()
            buffod.release()
示例#18
0
from displayarray.effects import crop
from displayarray import display
import numpy as np

# Scroll the mouse wheel and press ctrl, alt, or shift to select which channels are displayed as red, green, or blue.
arr = np.ones((250, 250, 250))
for x in range(250):
    arr[..., x] = x / 250.0
display(arr).block()
示例#19
0
from displayarray import display
import numpy as np

arr = np.random.normal(0.5, 0.1, (500, 500, 3))


def fix_arr_cv(arr_in):
    arr_in[:] += np.random.normal(0.01, 0.005, (500, 500, 3))
    arr_in %= 1.0


display(arr, callbacks=fix_arr_cv, blocking=True)
from displayarray import display
import numpy as np
from tensorflow.keras import layers, models
import tensorflow as tf
from examples.videos import test_video_2

for gpu in tf.config.experimental.list_physical_devices("GPU"):
    tf.compat.v2.config.experimental.set_memory_growth(gpu, True)

displayer = display(test_video_2)
displayer.wait_for_init()
autoencoder = models.Sequential()
autoencoder.add(
    layers.Conv2D(20, (3, 3),
                  activation="sigmoid",
                  input_shape=displayer.frames[0].shape))
autoencoder.add(
    layers.Conv2D(20, (3, 3),
                  activation="sigmoid",
                  input_shape=displayer.frames[0].shape))
autoencoder.add(layers.Conv2DTranspose(3, (3, 3), activation="sigmoid"))
autoencoder.add(layers.Conv2DTranspose(3, (3, 3), activation="sigmoid"))

autoencoder.compile(loss="mse", optimizer="adam")

while displayer:
    displayer.update()
    grab = tf.convert_to_tensor(
        next(iter(displayer.FRAME_DICT.values()))[np.newaxis, ...].astype(
            np.float32) / 255.0)
    grab_noise = tf.convert_to_tensor(
示例#21
0
    def test_display_numpy(self):
        from displayarray import display
        import numpy as np

        display(np.random.normal(0.5, 0.1, (500, 500, 3)))
示例#22
0
 def test_multi_cams_multi_source(self):
     display(0, np.random.uniform(0.0, 1.0, (500, 500)), blocking=True)
示例#23
0
 def test_multi_cams_one_source(self):
     display(0, window_names=["cammy", "cammy2"], blocking=True)
示例#24
0
 def test_display(self):
     display(np.ones((100, 100)), np.zeros((100, 100)), blocking=True)
示例#25
0
    def test_mouse_loop(self):
        @mouse_loop
        def print_mouse_thread(mouse_event):
            print(mouse_event)

        display("fractal test.mp4", blocking=True)
示例#26
0
from displayarray.effects import crop, lens
from displayarray import display
from examples.videos import test_video

# Move the mouse to center the image, scroll to increase/decrease barrel, ctrl+scroll to increase/decrease zoom

d = (display(test_video).add_callback(
    lens.BarrelPyTorch().enable_mouse_control(
        crop_size=(256, 256))).add_callback(
            crop.Crop(output_size=(256, 256, 3))).wait_for_init())

while d:
    if len(d.frames) > 0:
        pass
示例#27
0
from displayarray.effects import lens
from displayarray import display
from examples.videos import test_video

m = lens.Mustache()
m.enable_mouse_control()
display(test_video, callbacks=m, blocking=True)
示例#28
0
from displayarray import display
import math as m
from examples.videos import test_video


def forest_color(arr):
    forest_color.i += 1
    arr[..., 0] = (m.sin(forest_color.i *
                         (2 * m.pi) * 0.4 / 360) * 255 + arr[..., 0]) % 255
    arr[..., 1] = (m.sin(
        (forest_color.i *
         (2 * m.pi) * 0.5 + 45) / 360) * 255 + arr[..., 1]) % 255
    arr[..., 2] = (m.cos(forest_color.i *
                         (2 * m.pi) * 0.3 / 360) * 255 + arr[..., 2]) % 255


forest_color.i = 0

display(test_video, callbacks=forest_color, blocking=True, fps_limit=120)
def test_denoise_cpu():
    d = display(test_image_1, size=(1, 1), fps_limit=240)

    edge_kernel = -np.random.randint(
        -127, 127, (3, 3, 3, 3)).astype(dtype=np.float32) / 127.0

    kernel_height = kernel_width = 3
    kernel_stride_x = kernel_stride_y = 1

    kernel_padding_x = kernel_padding_y = 1
    kernel_dilation_x = kernel_dilation_y = 0

    first_frame = True

    width = height = None

    forward_computer = backward_computer = None

    buffkd = buffo = None

    in_channels = None
    kd_out_prev = None

    while d:
        if d.frames:
            frame = next(iter(d.frames.values()))[0].astype(np.float32) / 256.0
            noise = np.random.randint(
                0, 256, frame.shape).astype(dtype=np.float32) / 256.0
            noised_frame = frame + noise
            d.update(noised_frame, 'noised frame')

            if first_frame:
                height = frame.shape[0]
                width = frame.shape[1]
                in_channels = frame.shape[2]
                shader_file = open(
                    os.path.abspath(
                        os.sep.join([
                            '..', 'gltensors', 'shaders',
                            'dense_conv_forward_2d.glsl'
                        ])))
                forward_shader = shader_file.read()
                shader_file.close()

                shader_file = open(
                    os.path.abspath(
                        os.sep.join([
                            '..', 'gltensors', 'shaders',
                            'dense_conv_2d_backward.glsl'
                        ])))
                backward_shader = shader_file.read()
                shader_file.close()

                forward_computer = glcpu.GLSLComputer(
                    forward_shader,
                    width=width,
                    height=height,
                    in_channels=in_channels,
                    out_channels=in_channels,
                    kernel_width=kernel_width,
                    kernel_height=kernel_height,
                    kernel_stride_x=kernel_stride_x,
                    kernel_stride_y=kernel_stride_y,
                    kernel_padding_x=kernel_padding_x,
                    kernel_padding_y=kernel_padding_y,
                    kernel_dilation_x=kernel_dilation_x,
                    kernel_dilation_y=kernel_dilation_y)

                backward_computer = glcpu.GLSLComputer(
                    backward_shader,
                    width=width,
                    height=height,
                    in_channels=in_channels,
                    out_channels=in_channels,
                    kernel_width=kernel_width,
                    kernel_height=kernel_height,
                    kernel_stride_x=kernel_stride_x,
                    kernel_stride_y=kernel_stride_y,
                    kernel_padding_x=kernel_padding_x,
                    kernel_padding_y=kernel_padding_y,
                    kernel_dilation_x=kernel_dilation_x,
                    kernel_dilation_y=kernel_dilation_y)

                first_frame = False

            forward_computer = glcpu.GLSLComputer(
                forward_shader,
                width=width,
                height=height,
                in_channels=in_channels,
                out_channels=in_channels,
                kernel_width=kernel_width,
                kernel_height=kernel_height,
                kernel_stride_x=kernel_stride_x,
                kernel_stride_y=kernel_stride_y,
                kernel_padding_x=kernel_padding_x,
                kernel_padding_y=kernel_padding_y,
                kernel_dilation_x=kernel_dilation_x,
                kernel_dilation_y=kernel_dilation_y)

            buffk_f = forward_computer.ctx.buffer(data=edge_kernel.astype(
                dtype=np.float32).tobytes())
            buffk_f.bind_to_storage_buffer(1)

            buffi_f = forward_computer.ctx.buffer(data=frame.astype(
                dtype=np.float32).tobytes())
            buffi_f.bind_to_storage_buffer(0)

            buffo_f = forward_computer.ctx.buffer(
                data=np.zeros_like(frame).astype(dtype=np.float32).tobytes())
            buffo_f.bind_to_storage_buffer(2)

            forward_computer.cpu.run(height, width)
            forward_computer.ctx.finish()

            out_img = np.frombuffer(buffo_f.read(), dtype=np.float32).reshape(
                (height, width, in_channels))
            if np.average(out_img) < 0:
                print("out img -1")
                d.update(-out_img, 'out img')
            else:
                d.update(out_img, 'out img')

            out_error = out_img - frame

            buffk_f.release()
            buffi_f.release()
            buffo_f.release()
            forward_computer.ctx.release()

            backward_computer = glcpu.GLSLComputer(
                backward_shader,
                width=width,
                height=height,
                in_channels=in_channels,
                out_channels=in_channels,
                kernel_width=kernel_width,
                kernel_height=kernel_height,
                kernel_stride_x=kernel_stride_x,
                kernel_stride_y=kernel_stride_y,
                kernel_padding_x=kernel_padding_x,
                kernel_padding_y=kernel_padding_y,
                kernel_dilation_x=kernel_dilation_x,
                kernel_dilation_y=kernel_dilation_y)

            buffk_b = backward_computer.ctx.buffer(data=edge_kernel.astype(
                dtype=np.float32).tobytes())
            buffk_b.bind_to_storage_buffer(8)

            buffkd = backward_computer.ctx.buffer(
                data=np.zeros_like(edge_kernel).astype(
                    dtype=np.float32).tobytes())
            buffkd.bind_to_storage_buffer(5)

            buffi_b = backward_computer.ctx.buffer(
                frame.astype(dtype=np.float32).tobytes())
            buffi_b.bind_to_storage_buffer(7)

            buffid = backward_computer.ctx.buffer(
                data=np.zeros_like(frame).astype(dtype=np.float32).tobytes())
            buffid.bind_to_storage_buffer(4)

            buffod = backward_computer.ctx.buffer(data=out_error.astype(
                dtype=np.float32).tobytes())
            buffod.bind_to_storage_buffer(6)

            backward_computer.cpu.run(height, width)
            backward_computer.ctx.finish()

            ind_out = np.frombuffer(buffid.read(), dtype=np.float32).reshape(
                (height, width, in_channels))
            kd_out = np.frombuffer(buffkd.read(),
                                   dtype=np.float32).reshape(edge_kernel.shape)

            if np.average(out_error) < 0:
                print("out_error -1")
                d.update(-out_error, 'out err')
            else:
                d.update(out_error, 'out err')

            if np.average(ind_out) < 0:
                print("ind_out -1")
                d.update(-ind_out, 'in err')
            else:
                d.update(ind_out, 'in err')
            #print(kd_out)

            if kd_out_prev is not None:
                err_vel = np.abs(kd_out - kd_out_prev)
                err_vel = np.max(err_vel, 1)
            else:
                err_vel = np.ones_like(kd_out)
            kd_out_prev = kd_out

            #print(err_vel)
            d_half = (np.average(np.abs(kd_out)))

            err_1 = ((kd_out) / d_half)
            err_clip = np.abs(np.clip(err_1, -.5, .5))
            err_copy = kd_out.copy()
            err_copy[err_clip != .5] = 0

            v_half = (np.average(np.abs(err_vel)))

            err_vel_1 = ((err_vel) / v_half)
            err_vel_clip = np.abs(np.clip(err_vel_1, -.5, .5))
            err_vel[err_vel_clip != .5] = 0

            # velocity weighted adagrad.
            eta = 1.0
            eta_vel = 1.0e-1
            err_min_1 = np.where(
                err_copy != 0,
                np.divide(np.abs(edge_kernel),
                          np.sqrt(np.abs(err_copy)),
                          where=err_copy != 0), 0) * np.sign(err_copy)
            err_min_2 = np.where(
                err_vel != 0,
                np.divide(np.abs(edge_kernel),
                          np.sqrt(np.abs(err_vel)),
                          where=err_vel != 0), 0) * np.sign(err_vel)
            print(np.average(err_min_1))
            err_minus = eta * err_min_1 + err_min_2 * eta_vel
            edge_kernel -= err_minus

            #print(err_vel_1)
            buffi_b.release()
            buffk_b.release()
            buffkd.release()
            buffid.release()
            buffod.release()
            backward_computer.ctx.release()
示例#30
0
from displayarray.effects import crop, lens
from displayarray import display
from examples.videos import test_video
import math as m

# Move the mouse to center the image, scroll to increase/decrease barrel, ctrl+scroll to increase/decrease zoom

pre_crop_callback = crop.Crop(output_size=(480, 640, 3)).enable_mouse_control()
lens_callback = lens.BarrelPyTorch()
post_crop_callback = crop.Crop(output_size=(256, 256,
                                            3)).enable_mouse_control()

d = (display(
    0, size=(99999, 99999)).add_callback(pre_crop_callback).add_callback(
        lens_callback).add_callback(post_crop_callback).wait_for_init())

i = 0
while d:
    if len(d.frames) > 0:
        i += 1
        frame = d.frames[0]
        center_sin = [(m.sin(m.pi * (i / 70.0))), (m.cos(m.pi * (i / 120.0)))]
        pre_crop_callback.center = [
            center_sin[0] * 720 / 2 + 720 / 2,
            center_sin[1] * 1280 / 2 + 1280 / 2,
        ]
        lens_callback.center = [
            center_sin[0] * 480 / 2 + 480 / 2,
            center_sin[1] * 640 / 2 + 640 / 2,
        ]
        post_crop_callback.center = [480 / 2, 640 / 2]