示例#1
0
def test_colormap_coolwarm():
    """Test colormap support using coolwarm preset colormap"""
    with TestingCanvas(size=size, bgcolor='w') as c:
        idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)
        data = idata.reshape((size[0], size[1]))
        image = Image(cmap='coolwarm', clim='auto', parent=c.scene)
        image.set_data(data)
        assert_image_approved(c.render(), "visuals/colormap_coolwarm.png")
示例#2
0
def test_colormap_CubeHelix():
    """Test colormap support using cubehelix colormap in only blues"""
    with TestingCanvas(size=size, bgcolor='w') as c:
        idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)
        data = idata.reshape((size[0], size[1]))
        image = Image(cmap=get_colormap('cubehelix', rot=0, start=0),
                      clim='auto', parent=c.scene)
        image.set_data(data)
        assert_image_approved(c.render(), "visuals/colormap_cubehelix.png")
示例#3
0
def test_colormap_single_hue():
    """Test colormap support using a single hue()"""
    with TestingCanvas(size=size, bgcolor='w') as c:
        idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)
        data = idata.reshape((size[0], size[1]))
        image = Image(cmap=get_colormap('single_hue', 255),
                      clim='auto', parent=c.scene)
        image.set_data(data)
        assert_image_approved(c.render(), "visuals/colormap_hue.png")
示例#4
0
def test_colormap_discrete_nu():
    """Test discrete colormap with non-uniformly distributed control-points"""
    with TestingCanvas(size=size, bgcolor='w') as c:
        idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)
        data = idata.reshape((size[0], size[1]))
        image = Image(cmap=Colormap(np.array([[0, .75, 0], [.75, .25, .5]]),
                      [0., .25, 1.], interpolation='zero'),
                      clim='auto', parent=c.scene)
        image.set_data(data)
        assert_image_approved(c.render(), "visuals/colormap_nu.png")
示例#5
0
def test_colormap_discrete():
    """Test discrete RGB colormap"""
    with TestingCanvas(size=size, bgcolor='w') as c:
        idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)
        data = idata.reshape((size[0], size[1]))
        image = Image(cmap=Colormap(colors=['r', 'g', 'b'],
                      interpolation='zero'),
                      clim='auto', parent=c.scene)
        image.set_data(data)
        assert_image_approved(c.render(), "visuals/colormap_rgb.png")
示例#6
0
def test_colormap():
    """Test colormap support for non-uniformly distributed control-points"""
    with TestingCanvas(size=size, bgcolor='w') as c:
        idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)
        data = idata.reshape((size[0], size[1]))
        image = Image(cmap=Colormap(colors=['k', 'w', 'r'],
                      controls=[0.0, 0.1, 1.0]),
                      clim='auto', parent=c.scene)
        image.set_data(data)
        assert_image_approved(c.render(), "visuals/colormap_kwr.png")
示例#7
0
def test_image():
    """Test image visual"""
    size = (100, 50)
    with TestingCanvas(size=size, bgcolor='w') as c:
        image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
        for three_d in (True, False):
            shape = (size[1]-10, size[0]-10) + ((3,) if three_d else ())
            np.random.seed(379823)
            data = np.random.rand(*shape)
            image.set_data(data)
            assert_image_approved(c.render(), "visuals/image%s.png" %
                                  ("_rgb" if three_d else "_mono"))
示例#8
0
 def __init__(self, parent=None, interpolation='nearest'):
     """Init."""
     CbarBase.__init__(self)
     self._cblabel = 'Time-frequency map'
     # Visualization of large images can occur GL bugs. So we fix a limit
     # number of time points :
     self._n_limits = 4000
     # Initialize image object :
     pos = np.random.rand(2, 2, 4)
     self._image = Image(parent=parent, interpolation=interpolation)
     self._image.transform = vist.STTransform()
     self._image.set_data(pos)
示例#9
0
def image_visual(image, parent=None):
    """
    Returns a :class:`vispy.scene.visuals.Image` class instance using given
    image.

    Parameters
    ----------
    image : array_like
        Image.
    parent : Node, optional
        Parent of the image visual in the `SceneGraph`.

    Returns
    -------
    Image
        Image visual.
    """

    image = np.clip(image, 0, 1)

    return Image(image, parent=parent)
示例#10
0
def test_image_nan_single_band(texture_format):
    size = (40, 40)
    data = np.ones((40, 40))
    data[:5, :5] = np.nan
    data[:5, -5:] = 0

    expected = (np.ones((40, 40, 4)) * 255).astype(np.uint8)
    # black square
    expected[:5, -5:, :3] = 0
    if texture_format is None:
        # CPU scaling's NaNs get converted to 0s
        expected[:5, :5, :3] = 0
    else:
        # GPU receives NaNs
        # nan - transparent square
        expected[:5, :5, 0] = 0
        expected[:5, :5, 1] = 255  # match the 'green' background
        expected[:5, :5, 2] = 0

    with TestingCanvas(size=size[::-1], bgcolor=(0, 1, 0)) as c:
        Image(data, cmap='grays',
              texture_format=texture_format, parent=c.scene)
        rendered = c.render()
        np.testing.assert_allclose(rendered, expected)
示例#11
0
def test_image_clims_and_gamma():
    """Test image visual with clims and gamma on shader."""
    size = (40, 40)
    with TestingCanvas(size=size, bgcolor="w") as c:
        for three_d in (True,):
            shape = size + ((3,) if three_d else ())
            np.random.seed(0)
            image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
            data = np.random.rand(*shape)
            image.set_data(data)
            rendered = c.render()
            _dtype = rendered.dtype
            shape_ratio = rendered.shape[0] // data.shape[0]
            rendered1 = downsample(rendered, shape_ratio, axis=(0, 1)).astype(_dtype)
            predicted = _make_rgba(data)
            assert np.allclose(predicted, rendered1, atol=1)

            # adjust contrast limits
            new_clim = (0.3, 0.8)
            image.clim = new_clim
            rendered2 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
            scaled_data = np.clip((data - new_clim[0]) / np.diff(new_clim)[0], 0, 1)
            predicted = _make_rgba(scaled_data)
            assert np.allclose(predicted, rendered2, atol=1)
            assert not np.allclose(rendered1, rendered2, atol=10)

            # adjust gamma
            image.gamma = 2
            rendered3 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
            predicted = _make_rgba(scaled_data ** 2)
            assert np.allclose(
                predicted.astype(np.float), rendered3.astype(np.float), atol=2
            )
            assert not np.allclose(
                rendered2.astype(np.float), rendered3.astype(np.float), atol=10
            )
示例#12
0
class TFmapsMesh(CbarBase):
    """Visual class for time-frequency maps.

    Parameters
    ----------
    data : array_like
        Array of data of shape (N,)
    sf : float
        The sampling frequency.
    f_min : float | 1.
        Minimum frequency.
    f_max : float | 160.
        Maximum frequency.
    f_step : float | 2.
        Frequency step between two consecutive frequencies.
    baseline : array_like | None
        Baseline period.
    norm : int | None
        The normalization method.
    """

    def __init__(self, parent=None, interpolation='nearest'):
        """Init."""
        CbarBase.__init__(self)
        self._cblabel = 'Time-frequency map'
        # Visualization of large images can occur GL bugs. So we fix a limit
        # number of time points :
        self._n_limits = 4000
        # Initialize image object :
        pos = np.random.rand(2, 2, 4)
        self._image = Image(parent=parent, interpolation=interpolation)
        self._image.transform = vist.STTransform()
        self._image.set_data(pos)

    def __len__(self):
        """Return the number of time points."""
        return self._n

    def set_data(self, data, sf, f_min=1., f_max=160., f_step=1.,
                 baseline=None, norm=3, contrast=.1, n_window=None,
                 overlap=0., window='flat', **kwargs):
        """Set data to the time frequency map.

        Parameters
        ----------
        data : array_like
            Array of data of shape (N,)
        sf : float
            The sampling frequency.
        f_min : float | 1.
            Minimum frequency.
        f_max : float | 160.
            Maximum frequency.
        f_step : float | 2.
            Frequency step between two consecutive frequencies.
        baseline : array_like | None
            Baseline period.
        norm : int | None
            The normalization method. See the `normalization` function.
        """
        # ======================= CHECKING =======================
        assert isinstance(data, np.ndarray) and data.ndim == 1
        assert isinstance(sf, (int, float))
        assert isinstance(f_min, (int, float))
        assert isinstance(f_max, (int, float))
        assert isinstance(f_step, (int, float))
        # assert isinstance(baseline)

        # ======================= PRE-ALLOCATION =======================
        self._n = len(data)
        freqs = np.arange(f_min, f_max, f_step)  # frequency vector
        time = np.arange(len(self)) / sf
        tf = np.zeros((len(freqs), len(self)), dtype=data.dtype)

        # ======================= COMPUTE TF =======================
        for i, k in enumerate(freqs):
            tf[i, :] = np.square(np.abs(morlet(data, sf, k)))

        # ======================= NORMALIZATION =======================
        normalization(tf, norm=norm, baseline=baseline, axis=1)

        # ======================= AVERAGING =======================
        if isinstance(n_window, int):
            tf = averaging(tf, n_window, axis=1, overlap=overlap,
                           window=window)

        # ======================= DOWNSAMPLE =======================
        # Downsample large images :
        if tf.shape[1] > self._n_limits:
            downsample = int(np.round(tf.shape[1] / self._n_limits))
            tf = tf[:, ::downsample]

        # ======================= CLIM // CMAP =======================
        # Get contrast (if defined) :
        self._clim = kwargs.get('clim', None)
        if isinstance(contrast, (int, float)) and (self._clim is None):
            self._clim = (tf.min() * contrast, tf.max() * contrast)
        if self._clim is None:
            self._clim = (tf.min(), tf.max())
        kwargs['clim'] = self._clim
        # Cmap :
        self._cmap = kwargs.get('cmap', 'viridis')

        # ======================= COLOR =======================
        self._image.set_data(tf)
        self._image.clim = 'auto'
        self._image.cmap = cmap_to_glsl(limits=(tf.min(), tf.max()), **kwargs)

        # ======================= SCALE // TRANSLATE =======================
        # Scale and translate TF :
        t_min, t_max = time.min(), time.max()
        fr_min, fr_max = freqs.min(), freqs.max()
        # Re-scale the mesh for fitting in time / frequency :
        fact = (fr_max - fr_min) / len(freqs)
        sc = (t_max / tf.shape[1], fact, 1)
        tr = [0., fr_min, 0.]
        self._image.transform.scale = sc
        self._image.transform.translate = tr

        # ======================= CAMERA =======================
        self.rect = (time[0], f_min, t_max - t_min, fr_max - fr_min)
        self.freqs = freqs

    def update(self):
        """Update image."""
        self._image.update()

    # ----------- VISIBLE -----------
    @property
    def visible(self):
        """Get the visible value."""
        return self._visible

    @visible.setter
    def visible(self, value):
        """Set visible value."""
        self._visible = value
        self._image.visible = value

    # ----------- INTERPOLATION -----------
    @property
    def interpolation(self):
        """Get the interpolation value."""
        return self._interpolation

    @interpolation.setter
    def interpolation(self, value):
        """Set interpolation value."""
        self._interpolation = value
        self._image.interpolation = value
示例#13
0
 def add_image(self, image_data):
     image = Image(data=image_data, parent=self.view.scene)
     if self.view.camera:
         self.view.camera.set_range()
     return image
示例#14
0
class VispyUI(object):
    """
    Display an image, rectangle, text, enable pan/tilt, keyboard
    """
    def __init__(self):
        self._make_new_random_image()
        # state
        self._last_mouse_pos = np.array([0.0, 0.0])
        self._load_data()  # Load data in after the view has been setup
        self._canvas = vispy.scene.SceneCanvas(
            keys='interactive', show=True, title="Route editor -- H for help")
        self._viewbox = self._canvas.central_widget.add_view()
        self._viewbox.camera = vispy.scene.cameras.PanZoomCamera(
            parent=self._viewbox.scene, aspect=1)
        self._add_graphics_elements()

        self._canvas.events.key_press.connect(self._on_key_press)
        self._canvas.events.mouse_move.connect(self._on_mouse_move)
        self._canvas.events.mouse_press.connect(self._on_mouse_press)
        self._canvas.events.mouse_release.connect(self._on_mouse_release)

        self._set_map_image(self._image)

    def _make_new_random_image(self):
        self._h, self._w = 100 + int(np.random.rand(1) * 100), 100 + int(
            np.random.rand(1) * 100)
        self._image = np.uint8(
            np.zeros((self._h, self._w, 3)) +
            255 * np.random.rand(self._h * self._w * 3).reshape(
                (self._h, self._w, 3)))

    def _add_graphics_elements(self):
        """
        Create all the graphics objects (VISPY objects), put them on the canvas.
        """

        # Image
        self._image_object = Image(self._image, parent=self._viewbox.scene)
        self._image_object.set_gl_state('translucent', depth_test=False)
        self._image_object.order = 1
        self._image_object.visible = True

        self._text_box_width = 150
        self._text_box_height = 40
        self._text_box_offset = 10
        # Text background box in upper-left corner
        self._text_bkg_rect = vispy.scene.visuals.Rectangle(
            [
                self._text_box_width / 2 + self._text_box_offset,
                self._text_box_height / 2 + self._text_box_offset
            ],
            color=[0.1, 0.0, 0.0, .8],
            border_color=[0.1, 0.0, 0.0],
            border_width=2,
            height=self._text_box_height,
            width=self._text_box_width,
            radius=10.0,
            parent=self._canvas.scene)
        self._text_bkg_rect.set_gl_state('translucent', depth_test=False)
        self._text_bkg_rect.visible = True
        self._text_bkg_rect.order = 2

        # Text
        self._text = "?"
        self._text_pos = [
            self._text_box_offset + 10, self._text_box_offset + 10
        ]
        self._text_obj = vispy.scene.visuals.Text(self._text,
                                                  parent=self._canvas.scene,
                                                  color=[0.9, 0.8, 0.8],
                                                  anchor_x='left',
                                                  anchor_y='top')
        self._text_obj.pos = self._text_pos
        self._text_obj.font_size = 18
        self._text_obj.visible = True
        self._text_obj.order = 3

    def _change_text(self, new_text):
        if new_text is not None:
            self._text = new_text
        else:
            self._text = ""
        self._text_obj.text = self._text

    def _load_data(self):
        """
        Downlaod (if necessary) and load into memory the costmap, route and scrubber deck state controls.
        """
        self._images = []

    def _set_map_image(self, disp_image=None):
        """
        Extract an image to display from the current cost map.
        """
        if disp_image is not None:
            self._image = disp_image
        xmin, ymin = 0, 0
        xmax, ymax = self._image.shape[1], self._image.shape[0]
        self._viewbox.camera.set_range(x=(xmin, xmax), y=(ymin, ymax), z=None)
        self._image_object.set_data(self._image)
        self._image_object.visible = True

    def _on_key_press(self, ev):
        """
        vispy keyboard callback
        """

        if not ev or not ev.key:
            # on Mac sometimes this callback is triggered with
            # NoneType key when clicking between desktops or full screen apps
            return

        control_pressed = 'Control' in [e.name for e in ev.modifiers]
        shift_pressed = 'Shift' in [e.name for e in ev.modifiers]
        print "Shift:  %s\nControl:%s" % (shift_pressed, control_pressed)
        if ev.key.name == "Space":

            self._make_new_random_image()
            self._set_map_image()

        elif ev.key.name == "Z":
            print "Z"
        elif ev.key.name == "M":
            print "M"

        else:
            self._change_text(ev.key.name)
            logging.info("Unknown keypress:  %s" % (ev.key.name, ))

    def _on_mouse_move(self, event):
        """
        vispy mouse motion callback
        All mouse events are in map coordinate frame
        """
        self._last_mouse_pos = event.pos

    def _on_mouse_press(self, event):
        '''
        Vispy mouse button press callback.
        All mouse events are in map coordinate frame
        '''
        self._last_mouse_pos = event.pos

    def _on_mouse_release(self, event):
        """
        Vispy mouse button release callback.
        All mouse events are in map coordinate frame
        """

        self._last_mouse_pos = event.pos
示例#15
0
    def __init__(self, **kwargs):
        super(RadolanCanvas, self).__init__(keys='interactive', **kwargs)

        # set size ov Canvas
        self.size = 450, 450

        # unfreeze needed to add more elements
        self.unfreeze()

        # add grid central widget
        self.grid = self.central_widget.add_grid()

        # add view to grid
        self.view = self.grid.add_view(row=0, col=0)
        self.view.border_color = (0.5, 0.5, 0.5, 1)

        # add signal emitters
        self.mouse_moved = EventEmitter(source=self, type="mouse_moved")
        self.key_pressed = EventEmitter(source=self, type="key_pressed")

        # block double clicks
        self.events.mouse_double_click.block()

        # initialize empty RADOLAN image
        img_data = np.zeros((900, 900))

        # initialize colormap, we take cubehelix for now
        # this is the most nice colormap for radar in vispy
        cmap = 'cubehelix'

        self.images = []
        # initialize Image Visual with img_data
        # add to view
        self.image = Image(
            img_data,
            method='subdivide',
            #interpolation='bicubic',
            cmap=cmap,
            clim=(0, 50),
            parent=self.view.scene)

        self.images.append(self.image)

        # add transform to Image
        # (mostly positioning within canvas)
        self.image.transform = STTransform(translate=(0, 0, 0))

        # get radolan ll point coodinate into self.r0
        self.r0 = utils.get_radolan_origin()

        # create cities (Markers and Text Visuals
        self.create_cities()

        # create PanZoomCamera
        self.cam = PanZoomCamera(name="PanZoom",
                                 rect=Rect(0, 0, 900, 900),
                                 aspect=1,
                                 parent=self.view.scene)

        self.view.camera = self.cam

        self._mouse_position = None
        self.freeze()
        # print FPS to console, vispy SceneCanvas internal function
        self.measure_fps()
示例#16
0
class VispyUI(object):
    _CUSTOM_LAYERS = {'CircleLayer': CircleLayer, 'LineLayer': LineLayer}

    # Variables possible for kwargs, use these defaults if missing from kwargs
    _DEFAULTS = {
        'n_cores': 0,
        'epochs_per_cycle': 10,
        'display_multiplier': 3,
        'border': 1.0
    }

    def __init__(self,
                 state_file=None,
                 image=None,
                 suffix=None,
                 out_dir="output",
                 **kwargs):
        # epochs_per_cycle=None, n_cores=None, display_multiplier=None, border=None):

        if int(image is None) + int(state_file is None) != 1:
            raise Exception("Need input image, or state to restore.")
        # state
        self._tempdir = tempfile.mkdtemp(prefix="imageStretch_")

        if state_file is not None:
            self._load_state(state_file)
        else:
            self._suffix = "" if suffix is None else "_%s" % (suffix, )
            self._out_dir = out_dir
            self._image = image
            n_cores = kwargs[
                'n_cores'] if 'n_cores' in kwargs else self._DEFAULTS['n_cores']
            self._sim = ScaleInvariantImage(image, n_cores=n_cores, state=None)
            self._index = 0
            self._increment_run_number()

        # let these be overridden by command line if they're still none
        for arg in self._DEFAULTS:
            if arg in kwargs and kwargs[arg] is not None:
                setattr(self, privatize(arg), kwargs[arg])
            elif not hasattr(self, privatize(arg)):
                setattr(self, privatize(arg), self._DEFAULTS[arg])

        logging.info("Using args:")
        for arg in self._DEFAULTS:
            logging.info("\t%s: %s" % (arg, getattr(self, privatize(arg))))

        self._interactive = not kwargs['just_image']
        if kwargs['just_image']:
            self._write_image()
            return

        # set up VISPY display
        self._canvas = vispy.scene.SceneCanvas(
            keys='interactive', show=True, title="Route editor -- H for help")
        self._viewbox = self._canvas.central_widget.add_view()
        self._viewbox.camera = vispy.scene.cameras.PanZoomCamera(
            parent=self._viewbox.scene, aspect=1)
        self._add_graphics_elements()
        self.set_text_box_state(False)
        self._canvas.events.key_press.connect(self._on_key_press)
        self._canvas.events.close.connect(self._on_close)
        self._closing = False
        self._set_image(self._image / 255.0)
        self._worker = Thread(target=self._train_thread)

        self._worker.start()
        logging.info("Started worker thread.")

    def is_interactive(self):
        return self._interactive

    def _on_close(self, event):
        self._closing = True
        logging.info("Shutting down...")

    def _increment_run_number(self):
        if not os.path.exists(self._out_dir):
            os.mkdir(self._out_dir)
        files = [
            f for f in os.listdir(self._out_dir)
            if f.startswith('output') and f.endswith('.png')
        ]
        numbers = [
            int(re.search('^output_([0-9]+)_.+.png', f).groups()[0])
            for f in files
        ]
        self._run = np.max(numbers) + 1 if len(numbers) > 0 else 0

    def _save_state(self):
        logging.info("Saving state...")
        model_filename = "model_%i%s.tf" % (self._run, self._suffix)
        model_filepath = os.path.join(self._out_dir, model_filename)
        temp_filepath = os.path.join(self._tempdir, model_filename)
        self._sim.get_model().save(temp_filepath)
        with open(temp_filepath, 'r') as infile:
            model_bin = infile.read()
        model = {
            'model_bin': model_bin,
            'epc': self._epochs_per_cycle,
            'dm': self._display_multiplier,
            'suffix': self._suffix,
            'out_dir': self._out_dir,
            'image': self._image,
            'border': self._border,
            'index': self._index,
            'run': self._run
        }

        with open(model_filepath, 'w') as outfile:
            cp.dump(model, outfile)
        logging.info("Wrote:  %s" % (model_filepath, ))

    def _load_state(self, model_filepath):
        logging.info("loading state...")
        with open(model_filepath, 'r') as infile:
            state = cp.load(infile)
        self._epochs_per_cycle = state['epc']
        self._display_multiplier = state['dm']
        self._suffix = state['suffix']
        self._out_dir = state['out_dir']
        self._image = state['image']
        self._index = state['index'] + 1
        self._run = state['run']
        self._border = state['border']
        model_filename = "model_%i%s.tf" % (self._run, self._suffix)
        model_filepath = os.path.join(self._tempdir, model_filename)
        with open(model_filepath, 'w') as outfile:
            outfile.write(state['model_bin'])
        state['model'] = keras.models.load_model(
            model_filepath, custom_objects=self._CUSTOM_LAYERS)
        self._sim = ScaleInvariantImage(state=state)

    def _train_thread(self):
        tf_session, tf_graph = self._sim.get_session_and_graph()
        with tf_session.as_default():
            with tf_graph.as_default():

                while not self._closing:
                    for ep in range(self._epochs_per_cycle):
                        if self._closing:
                            break
                        self._sim.train_epochs(1)
                    self._save_state()
                    if self._closing:
                        break
                    img = self._write_image()
                    self._set_image(img)
                    self._index += 1
                    if self._closing:
                        break
        self._save_state()
        logging.info("Close detected, shutting down worker thread.")
        logging.info("Deleting work dir:  %s" % (self._tempdir, ))
        shutil.rmtree(self._tempdir)

    def _write_image(self):

        img = self._sim.get_display_image(
            np.array(self._sim.get_image().shape) * self._display_multiplier,
            margin=self._border)

        out_filename = "output_%i%s_%.8i.png" % (self._run, self._suffix,
                                                 self._index)
        out_path = os.path.join(self._out_dir, out_filename)
        while os.path.exists(out_path):
            self._index += 1
            out_filename = "output_%i%s_%.8i.png" % (self._run, self._suffix,
                                                     self._index)
            out_path = os.path.join(self._out_dir, out_filename)

        cv2.imwrite(out_path, np.uint8(255 * img[:, :, ::-1]))
        logging.info("Wrote:  %s" % (out_path, ))
        return img

    def _add_graphics_elements(self):
        """
        Create the VISPY graphics objects
        """

        # Image
        self._image_object = Image(self._image, parent=self._viewbox.scene)
        self._image_object.set_gl_state('translucent', depth_test=False)
        self._image_object.order = 1
        self._image_object.visible = True

        self._text_box_width = 150
        self._text_box_height = 40
        self._text_box_offset = 10
        # Text background box in upper-left corner
        self._text_bkg_rect = vispy.scene.visuals.Rectangle(
            [
                self._text_box_width / 2 + self._text_box_offset,
                self._text_box_height / 2 + self._text_box_offset
            ],
            color=[0.1, 0.0, 0.0, .8],
            border_color=[0.1, 0.0, 0.0],
            border_width=2,
            height=self._text_box_height,
            width=self._text_box_width,
            radius=10.0,
            parent=self._canvas.scene)
        self._text_bkg_rect.set_gl_state('translucent', depth_test=False)
        self._text_bkg_rect.visible = True
        self._text_bkg_rect.order = 2

        # Text
        self._text = "?"
        self._text_pos = [
            self._text_box_offset + 10, self._text_box_offset + 10
        ]
        self._text_obj = vispy.scene.visuals.Text(self._text,
                                                  parent=self._canvas.scene,
                                                  color=[0.9, 0.8, 0.8],
                                                  anchor_x='left',
                                                  anchor_y='top')
        self._text_obj.pos = self._text_pos
        self._text_obj.font_size = 18
        self._text_obj.visible = True
        self._text_obj.order = 3

    def set_text_box_state(self, state):
        self._text_bkg_rect.visible = state
        self._text_obj.visible = state

    def _change_text(self, new_text):
        if new_text is not None:
            self._text = new_text
        else:
            self._text = ""
        self._text_obj.text = self._text

    def _set_image(self, image=None):
        xmin, ymin = 0, 0
        xmax, ymax = image.shape[1], image.shape[0]
        self._viewbox.camera.set_range(x=(xmin, xmax), y=(ymin, ymax), z=None)
        self._image_object.set_data(flip(image))
        self._image_object.visible = True

    def _on_key_press(self, ev):
        if ev.key.name == 'Escape':
            self._canvas.close()
        else:
            self._change_text(ev.key.name)
            logging.info("Unknown keypress:  %s" % (ev.key.name, ))
示例#17
0
class AntSimulator(object):
    MIN_VELOCITY = 0.2
    MAX_VELOCITY = 0.8
    # MIN_VELOCITY = 0.128 # 0.0005 * self._FIELD_WIDTH #original on legacy program
    # MAX_VELOCITY = 0.256 # 0.001 * self._FIELD_WIDTH #original on legacy program
    MAX_ANGULAR_VELOCITY = 0.05 * np.pi
    SENSOR_NUM = 7
    AGENT_RADIUS = 12.8 # 0.05 * self._FIELD_WIDTH  #original on legacy program
    SENSOR_NOISE = 0.1

    def __init__(self, N, width=600, height=600, decay_rate=1.0, hormone_secretion=None):
        from PIL.Image import open as open_image
        # setup simulation
        self._N = N
        self._INITIAL_FIELD = np.array(open_image(path.join(ENV_MAP_PATH, 'envmap01.png'))).astype(np.float32) / 255.
        #self._INITIAL_FIELD = np.zeros(self._INITIAL_FIELD.shape)
        self._FIELD_WIDTH = self._INITIAL_FIELD.shape[1]
        self._FIELD_HEIGHT = self._INITIAL_FIELD.shape[0]
        self._FIELD_DECAY_RATE = decay_rate
        self._SECRATION = hormone_secretion
        sensor_th = np.linspace(0, 2*np.pi, self.SENSOR_NUM, endpoint=False)
        self._SENSOR_POSITION = self.AGENT_RADIUS * np.array([np.cos(sensor_th), np.sin(sensor_th)]).T
        self.reset()  # initialize all variables, position, velocity and field status

        # setup display
        self._canvas = SceneCanvas(size=(width, height), position=(0,0), keys='interactive', title="ALife book "+self.__class__.__name__)
        self._canvas.events.mouse_double_click.connect(self._on_mouse_double_click)
        self._view = self._canvas.central_widget.add_view()
        self._view.camera = PanZoomCamera((0, 0, self._FIELD_WIDTH, self._FIELD_HEIGHT), aspect=1)
        self._field_image = Image(self._field, interpolation='nearest', parent=self._view.scene, method='subdivide', clim=(0,1))
        self._agent_polygon = []
        for i in range(self._N):
            p = AntSimulator._generate_agent_visual_polygon(self.AGENT_RADIUS)
            p.parent = self._field_image
            self._agent_polygon.append(p)
        self._canvas.show()

    def reset(self, random_seed=None):
        np.random.seed(random_seed)
        self._field =  self._INITIAL_FIELD.copy()
        self._agents_pos = np.random.random((self._N, 2)).astype(np.float32) * self._FIELD_WIDTH
        self._agents_th = np.random.random(self._N).astype(np.float32) * np.pi * 2
        self._agents_vel = np.ones(self._N).astype(np.float32) * 0.001
        self._agents_ang_vel = (np.random.random(self._N).astype(np.float32) * 0.1 - 0.05) * np.pi
        self._agents_fitness = np.zeros(self._N)

    def get_sensor_data(self):
        sensor_data = np.empty((self._N, 7))
        for ai in range(self._N):
            th = self._agents_th[ai]
            rot_mat = np.array([[np.cos(th), -np.sin(th)],[np.sin(th), np.cos(th)]])
            for si, sensor_pos in enumerate(self._SENSOR_POSITION):
                sx, sy = rot_mat @ sensor_pos
                xi = int((sx + self._agents_pos[ai][0] + self._FIELD_WIDTH) % self._FIELD_WIDTH)
                yi = int((sy + self._agents_pos[ai][1] + self._FIELD_HEIGHT) % self._FIELD_HEIGHT)
                sensor_data[ai, si] = self._field[yi, xi] + np.random.randn() * self.SENSOR_NOISE
        return sensor_data

    def set_agent_color(self, index, color):
        self._agent_polygon[index].border_color = color

    def update(self, action):
        # action take 0-1 value
        v = action[:,0] * (self.MAX_VELOCITY - self.MIN_VELOCITY) + self.MIN_VELOCITY
        av = (action[:,1] - 0.5) * 2 * self.MAX_ANGULAR_VELOCITY
        self._agents_pos += (v * [np.cos(self._agents_th), np.sin(self._agents_th)]).T
        self._agents_th += av

        self._agents_pos[:,0] = (self._agents_pos[:,0] + self._FIELD_WIDTH) % self._FIELD_WIDTH
        self._agents_pos[:,1] = (self._agents_pos[:,1] + self._FIELD_HEIGHT) % self._FIELD_HEIGHT
        self._agents_th = (self._agents_th + 2.0 * np.pi) % (2.0 * np.pi)

        for x, y in self._agents_pos.astype(int):
            for i in range(-1, 2):
                for j in range(-1, 2):
                    xi = (x + i + self._FIELD_WIDTH) % self._FIELD_WIDTH
                    yi = (y + j + self._FIELD_HEIGHT) % self._FIELD_HEIGHT
                    self._agents_fitness += self._field[yi,xi]
                    if self._SECRATION is None:
                        #self._field[yi,xi] *= 0.5 # current running
                        self._field[yi,xi] *= 0.9 # sampledata_last2
                    else:
                        self._field[yi, xi] += self._SECRATION
        self._field.clip(0, 1)

        self._field *= self._FIELD_DECAY_RATE

        self._field_image.set_data(self._field)
        for polygon, (x, y), th in zip(self._agent_polygon, self._agents_pos, self._agents_th):
            polygon.transform.reset()
            polygon.transform.rotate(180 * th / np.pi, (0,0,1))
            polygon.transform.translate((x, y))

        self._canvas.update()
        app.process_events()

    def get_fitness(self):
        return self._agents_fitness

    def _on_mouse_double_click(self, event):
        self._view.camera.set_range(x = (0, self._FIELD_WIDTH), y = (0, self._FIELD_HEIGHT), margin=0)

    def __bool__(self):
        return not self._canvas._closed

    @staticmethod
    def _generate_agent_visual_polygon(radius):
        th = np.linspace(0, 2*np.pi, 16)
        points = np.c_[np.cos(th), np.sin(th)]
        points = np.r_[[[0,0]], points] * radius
        polygon = Polygon(points, color=(0, 0, 0, 0), border_color=(1, 0, 0), border_method='agg', border_width=2)
        #polygon = Polygon(points, color=(0, 0, 0, 0), border_color=(1, 0, 0))  # more fast, but agents visual is hard to seee
        polygon.transform = MatrixTransform()
        return polygon
示例#18
0
class ImageSorter(object):
    """
    Display an image, rectangle, text, enable pan/tilt, keyboard
    """
    def __init__(self, image_dir, out_file=None, max_load=10):
        self._in_dir = os.path.abspath(os.path.expanduser(image_dir))
        self._out_file = out_file if out_file is not None else os.path.join(
            self._in_dir, "results.json")
        self._images = LabeledImageSet(image_dir, self._out_file,
                                       self._out_file)
        self._current_filename, self._current_index = self._images.get_unlabeled(
        )

        # state
        self._last_mouse_pos = np.array([0.0, 0.0])
        self._canvas = vispy.scene.SceneCanvas(
            keys='interactive', show=True, title="Route editor -- H for help")
        self._viewbox = self._canvas.central_widget.add_view()
        self._viewbox.camera = vispy.scene.cameras.PanZoomCamera(
            parent=self._viewbox.scene, aspect=1)
        self._add_graphics_elements()
        self._canvas.events.key_press.connect(self._on_key_press)
        self._canvas.events.mouse_move.connect(self._on_mouse_move)
        self._canvas.events.mouse_press.connect(self._on_mouse_press)
        self._canvas.events.mouse_release.connect(self._on_mouse_release)
        self._canvas.events.resize.connect(self._on_resize)
        self._update_image()

    def _save_results(self):
        self._images.save()

    def _add_graphics_elements(self):
        """
        Create all the graphics objects (VISPY objects), put them on the canvas.
        """

        # Image
        self._image_object = Image(None, parent=self._viewbox.scene)
        self._image_object.set_gl_state('translucent', depth_test=False)
        self._image_object.order = 1
        self._image_object.visible = True

        self._text_box_width = 150
        self._text_box_height = 60
        self._text_box_offset = 10
        # Text background box in upper-left corner
        self._text_bkg_rect = vispy.scene.visuals.Rectangle(
            [
                self._text_box_width / 2 + self._text_box_offset,
                self._text_box_height / 2 + self._text_box_offset
            ],
            color=[0.1, 0.0, 0.0, .8],
            border_color=[0.1, 0.0, 0.0],
            border_width=2,
            height=self._text_box_height,
            width=self._text_box_width,
            radius=10.0,
            parent=self._canvas.scene)
        self._text_bkg_rect.set_gl_state('translucent', depth_test=False)
        self._text_bkg_rect.visible = True
        self._text_bkg_rect.order = 2
        self._resize_text_bkg_box()

        # Text
        self._font1_size = 10
        self._font2_size = 18
        self._vspace = self._font1_size * 2.2
        self._text_pos = [
            self._text_box_offset + 10, self._text_box_offset + 10
        ]
        self._text_obj = vispy.scene.visuals.Text("",
                                                  parent=self._canvas.scene,
                                                  color=[0.9, 0.8, 0.8],
                                                  anchor_x='left',
                                                  anchor_y='top')
        self._text_obj.pos = self._text_pos
        self._text_obj.font_size = self._font1_size
        self._text_obj.visible = True
        self._text_obj.order = 3

        self._text2_pos = [self._text_pos[0], self._vspace + self._text_pos[1]]
        self._text2_obj = vispy.scene.visuals.Text("",
                                                   parent=self._canvas.scene,
                                                   color=[0.9, 0.8, 0.8],
                                                   anchor_x='left',
                                                   anchor_y='top')
        self._text2_obj.pos = self._text2_pos
        self._text2_obj.font_size = self._font2_size
        self._text2_obj.visible = True
        self._text2_obj.order = 3

    def _resize_text_bkg_box(self):
        logging.info('resize')
        self._text_bkg_rect.center = np.array([
            self._canvas.size[0] / 2,
            self._text_box_height / 2 + self._text_box_offset
        ])
        self._text_bkg_rect.width = self._canvas.size[
            0] - 2 * self._text_box_offset

    def _on_resize(self, event):
        self._resize_text_bkg_box()

    def _update_image(self):
        """
        Extract an image to display from the current cost map.
        """
        self._current_filename = self._images.get_i(self._current_index)
        self._current_image = self._images.get_image(self._current_filename)

        logging.info("Got new image:  %s, %s" %
                     (self._current_image.shape, self._current_filename))
        xmin, ymin = 0, 0
        xmax, ymax = self._current_image.shape[1], self._current_image.shape[0]
        self._viewbox.camera.set_range(x=(xmin, xmax), y=(ymin, ymax), z=None)
        self._image_object.set_data(self._current_image)
        self._image_object.visible = True
        self._update_text()

    def _update_text(self):
        result = self._images.get_label(self._current_filename)
        result_str = "%i" % (result, ) if result is not None else "(unlabeled)"

        text1 = self._current_filename
        text2 = "%i / %i - label:  %s" % (self._current_index,
                                          self._images.get_n(), result_str)
        self._text_obj.text = text1
        self._text2_obj.text = text2

        self._text_obj.visible = True
        self._text2_obj.visible = True

    def _on_key_press(self, ev):
        """
        vispy keyboard callback
        """

        if not ev or not ev.key:
            # on Mac sometimes this callback is triggered with
            # NoneType key when clicking between desktops or full screen apps
            return

        # control_pressed = 'Control' in [e.name for e in ev.modifiers]
        shift_pressed = 'Shift' in [e.name for e in ev.modifiers]
        # print "Shift:  %s\nControl:%s" % (shift_pressed, control_pressed)

        if ev.key.name == "Left":
            self._advance_index(-1)

        elif ev.key.name == "Right":
            self._advance_index(1)

        elif ev.key.name == "X":
            self._classify_and_go_to_next(
                0, skip_to_next_unlabeled=not shift_pressed)

        elif ev.key.name == "O":
            self._classify_and_go_to_next(
                1, skip_to_next_unlabeled=not shift_pressed)

        else:
            logging.info("Unknown keypress:  %s" % (ev.key.name, ))

    def _advance_index(self, direction):
        new_index = self._current_index + direction
        if new_index >= 0 and new_index <= self._images.get_n() - 1:
            self._current_index = new_index
        self._update_image()
        self._update_text()

    def _classify_and_go_to_next(self, label, skip_to_next_unlabeled=True):
        self._images.apply_label(self._current_filename, label)
        self._images.save()
        if skip_to_next_unlabeled:
            self._current_filename, self._current_index = self._images.get_unlabeled(
            )
            self._update_image()
            self._update_text()
        else:
            self._advance_index(1)

    def _on_mouse_move(self, event):
        self._last_mouse_pos = event.pos

    def _on_mouse_press(self, event):
        self._last_mouse_pos = event.pos

    def _on_mouse_release(self, event):
        self._last_mouse_pos = event.pos