Beispiel #1
0
    def _images_thumbnails(self):
        from vispy.io import imsave, imread
        from skimage.transform import resize
        import numpy as np
        gallery_dir = op.join(IMAGES_DIR, 'gallery')
        thumbs_dir = op.join(IMAGES_DIR, 'thumbs')
        carousel_dir = op.join(IMAGES_DIR, 'carousel')
        for fname in os.listdir(gallery_dir):
            filename1 = op.join(gallery_dir, fname)
            filename2 = op.join(thumbs_dir, fname)
            filename3 = op.join(carousel_dir, fname)
            #
            im = imread(filename1)

            newx = 200
            newy = int(newx * im.shape[0] / im.shape[1])
            im = (resize(im, (newy, newx), 2) * 255).astype(np.uint8)
            imsave(filename2, im)

            newy = 160  # This should match the carousel size!
            newx = int(newy * im.shape[1] / im.shape[0])
            im = (resize(im, (newy, newx), 1) * 255).astype(np.uint8)
            imsave(filename3, im)

            print('Created thumbnail and carousel %s' % fname)
Beispiel #2
0
    def _images_thumbnails(self):
        from vispy.io import imsave, imread
        # TODO: Switch to using PIL for resizing
        from skimage.transform import resize
        import numpy as np
        gallery_dir = op.join(IMAGES_DIR, 'gallery')
        thumbs_dir = op.join(IMAGES_DIR, 'thumbs')
        carousel_dir = op.join(IMAGES_DIR, 'carousel')
        for fname in os.listdir(gallery_dir):
            filename1 = op.join(gallery_dir, fname)
            filename2 = op.join(thumbs_dir, fname)
            filename3 = op.join(carousel_dir, fname)
            #
            im = imread(filename1)

            newx = 200
            newy = int(newx * im.shape[0] / im.shape[1])
            im = (resize(im, (newy, newx), 2) * 255).astype(np.uint8)
            imsave(filename2, im)

            newy = 160  # This should match the carousel size!
            newx = int(newy * im.shape[1] / im.shape[0])
            im = (resize(im, (newy, newx), 1) * 255).astype(np.uint8)
            imsave(filename3, im)

            print('Created thumbnail and carousel %s' % fname)
Beispiel #3
0
def test_read_write_image():
    """Test reading and writing of images"""
    fname = op.join(temp_dir, 'out.png')
    im1 = load_crate()
    imsave(fname, im1, format='png')
    with warnings.catch_warnings(record=True):  # PIL unclosed file
        im2 = imread(fname)
    assert_allclose(im1, im2)
Beispiel #4
0
def show_mol_surf(mol_obj=None,
                  surf_datas=None,
                  l_par=None,
                  reduced_mol=False,
                  **kwargs):
    canvas = scene.SceneCanvas(keys='interactive', bgcolor=(1, 1, 1, 1))
    view = canvas.central_widget.add_view()

    num = None
    if surf_datas is not None:
        if isinstance(surf_datas, tuple):
            colors = ColorIter(len(surf_datas))
            for data in surf_datas:
                if num is not None and len(data) != num:
                    raise TypeError("different dimensions of surfaces")
                num = len(data)
                build_surface(data=data, view=view, color=next(colors))
        else:
            build_surface(data=surf_datas, view=view, color=next(ColorIter(1)))
            num = len(surf_datas)

    if mol_obj is not None:
        mol = mol_obj.copy()
        if surf_datas is not None:
            if l_par is not None:
                mol.pos /= l_par
                mol.pos *= num
        vis_mol_wrap(mol, view=view, reduced_mol=reduced_mol)

    if num is None:
        num = 50
    sr = kwargs.get('setrange', num)
    cam = scene.TurntableCamera(elevation=30, azimuth=30)
    if kwargs.get('show_axis', True):
        axis = myXYZAxis(length=num + 10, parent=view.scene)
    cam.set_range((-sr, sr), (-sr, sr), (-sr, sr))
    view.camera = cam
    cube_size = kwargs.get('cube_size', 0.)
    scene.visuals.Cube(size=cube_size * num / 2,
                       color=(0.9, 0.9, 0.3, 0.4),
                       edge_color="black",
                       parent=view.scene)
    canvas.show()

    if kwargs.get('quit', False):
        app.process_events()
    else:
        app.run()

    if kwargs.get('screenshot', False):
        name = kwargs.get("screenname", 'screenshot.png')
        im = _screenshot((0, 0, canvas.size[0], canvas.size[1]))
        imsave(name, im)
Beispiel #5
0
 def _images_thumbnails(self):
     from vispy.io import imsave, imread
     from skimage.transform import resize
     import numpy as np
     gallery_dir = os.path.join(IMAGES_DIR, 'gallery')
     thumbs_dir = os.path.join(IMAGES_DIR, 'thumbs')
     for fname in os.listdir(gallery_dir):
         filename1 = os.path.join(gallery_dir, fname)
         filename2 = os.path.join(thumbs_dir, fname)
         #
         im = imread(filename1)
         newx = 200
         newy = int( newx * im.shape[0] / im.shape[1])
         im = (resize(im, (newy, newx), 2)*255).astype(np.uint8)
         imsave(filename2, im)
         print('Created thumbnail %s' % fname)
        def __screenshot(Canvas, saveas):
            # Get a copy of the actual canvas physical size :
            backp_size = Canvas.canvas.physical_size
            # Increase the physical size :
            ratio = max(6000/backp_size[0], 3000/backp_size[1])
            new_size = (int(backp_size[0]*ratio), int(backp_size[1]*ratio))
            Canvas.canvas._backend._physical_size = new_size

            # Render and save :
            img = Canvas.canvas.render(region=self._crop)
            io.imsave(saveas, img)

            # Set to the canvas it's previous size :
            Canvas.canvas._backend._physical_size = backp_size
            # Update the canvas :
            Canvas.canvas.update
    def on_timer(self, event):
        self.galaxy.iterate()

        if plot_traj:
            self.line1.add_point(tuple(
                self.galaxy.positions[self.traj1_id, :]))
            self.line2.add_point(tuple(
                self.galaxy.positions[self.traj2_id, :]))

        self.update()
        self._draw_scene()

        if render_mode:
            img = gloo.util._screenshot()
            filename = 'img_{:04d}.png'.format(self.iteration)
            print('Rendering img {}/{}'.format(self.iteration + 1, n_frames))
            io.imsave(os.path.join(self.dest, filename), img[:, :, :3])
            self.iteration += 1
            if self.iteration == n_frames:
                exit(0)
Beispiel #8
0
def mapping_and_tresholding(predictions, patches_labels_dict_cat,
                            list_train_info, list_train_patches_info, count):

    h0 = []
    h1 = []
    h2 = []

    j = 0
    m = 0

    #prepare lists of probabilities for each class
    for key in list_train_info:
        patches_len = list_train_info.get(key).get('patches')
        patches_label = list_train_info.get(key).get('label')
        for i in range(patches_len):
            if patches_label == 0:
                h0.append(predictions[0][j][0])
                h0.append(predictions[1][j][0])
                h0.append(predictions[2][j][0])
            elif patches_label == 1:
                h1.append(predictions[0][j][0])
                h1.append(predictions[1][j][0])
                h1.append(predictions[2][j][0])
        else:
            h2.append(predictions[0][j][0])
            h2.append(predictions[1][j][0])
            h2.append(predictions[2][j][0])
        j = j + 1

#sorting and getting percentiles
    h0.sort()
    h1.sort()
    h2.sort()
    thresh0 = np.percentile(h0, 70)
    thresh1 = np.percentile(h1, 70)
    thresh2 = np.percentile(h2, 70)

    #    creating probability maps for each biopsy based on predicted probabilities

    j = 0

    for key in list_train_info:

        patches_len = list_train_info.get(key).get('patches')
        height = list_train_info.get(key).get('height')
        width = list_train_info.get(key).get('width')
        patches_label = list_train_info.get(key).get('label')
        ar0 = np.zeros(height * width)
        ar_im0 = []
        ar1 = np.zeros(height * width)
        ar_im1 = []
        ar2 = np.zeros(height * width)
        ar_im2 = []
        for i in range(patches_len):
            index = list_train_patches_info[j].get('patch_index')
            id = list_train_patches_info[j].get('patch_id')
            ar0[index] = predictions[0][j][0]
            ar_im0.append(predictions[0][j][0])
            ar1[index] = predictions[1][j][0]
            ar_im1.append(predictions[1][j][0])
            ar2[index] = predictions[2][j][0]
            ar_im2.append(predictions[2][j][0])
            j = j + 1

        ar_im0.sort()
        thresh_im0 = np.percentile(ar_im0, 70)
        ar_im1.sort()
        thresh_im1 = np.percentile(ar_im1, 70)
        ar_im2.sort()
        thresh_im2 = np.percentile(ar_im2, 70)

        ar0 = ar0.reshape(height, width)
        gaussian0 = ndimage.gaussian_filter(ar0, sigma=1, order=0)
        gaussian0 = gaussian0.reshape((height * width))

        ar1 = ar1.reshape(height, width)
        gaussian1 = ndimage.gaussian_filter(ar1, sigma=1, order=0)
        gaussian1 = gaussian1.reshape((height * width))

        ar2 = ar2.reshape(height, width)
        gaussian2 = ndimage.gaussian_filter(ar2, sigma=1, order=0)
        gaussian2 = gaussian2.reshape((height * width))

        color = np.zeros((height * width, 3))

        for l in range(patches_len):
            index = list_train_patches_info[m].get('patch_index')
            id = list_train_patches_info[m].get('patch_id')

            if patches_label == 0:
                thresh = min(thresh0, thresh_im0)
                current_label = patches_labels_dict_cat.get(id)[0]

                if (gaussian0[index] <= thresh and
                    (gaussian1[index] <= 0.5 and gaussian2[index] <= 0.5)):
                    non_discr_0 = True
                else:
                    non_discr_0 = False

                if non_discr_0 == True:
                    if current_label == 1:
                        patches_labels_dict_cat.get(id)[0] = 0
                    color[index][0] = 255
                    color[index][1] = 255

                else:
                    if current_label == 0:
                        patches_labels_dict_cat.get(id)[0] = 1
                    color[index][0] = 255

            elif patches_label == 1:
                thresh = min(thresh1, thresh_im1)
                current_label = patches_labels_dict_cat.get(id)[1]
                if (gaussian1[index] <= thresh and
                    (gaussian0[index] <= 0.5 and gaussian2[index] <= 0.5)):
                    non_discr_0 = True
                else:
                    non_discr_0 = False

                if non_discr_0 == True:
                    if current_label == 1:
                        patches_labels_dict_cat.get(id)[1] = 0
                    color[index][0] = 255
                    color[index][1] = 255

                else:
                    if current_label == 0:
                        patches_labels_dict_cat.get(id)[1] = 1
                    color[index][1] = 255

            else:
                thresh = min(thresh2, thresh_im2)
                current_label = patches_labels_dict_cat.get(id)[2]
                if (gaussian2[index] <= thresh and
                    (gaussian0[index] <= 0.5 and gaussian1[index] <= 0.5)):
                    non_discr_0 = True
                else:
                    non_discr_0 = False

                if non_discr_0 == True:
                    if current_label == 1:
                        patches_labels_dict_cat.get(id)[2] = 0
                    color[index][0] = 255
                    color[index][1] = 255

                else:
                    if current_label == 0:
                        patches_labels_dict_cat.get(id)[2] = 1
                    color[index][2] = 255

            m = m + 1

        color = color.reshape((height, width, 3))
        filename = 'maps/' + str(key) + '_color_map_label_' + str(
            patches_label) + '_iter_' + str(count) + '.png'
        imsave(filename, color)

        return
Beispiel #9
0
    def _images_screenshots(self):
        # Prepare
        import imp
        from vispy.io import imsave
        from vispy.gloo.util import _screenshot
        examples_dir = op.join(ROOT_DIR, 'examples')
        gallery_dir = op.join(IMAGES_DIR, 'gallery')

        # Process all files ...
        for filename, name in get_example_filenames(examples_dir):
            name = name.replace('/', '__')  # We use flat names
            imagefilename = op.join(gallery_dir, name + '.png')

            # Check if we need to take a sceenshot
            if op.isfile(imagefilename):
                print('Skip:   %s screenshot already present.' % name)
                continue

            # Check if should make a screenshot
            frames = []
            lines = open(filename, 'rb').read().decode('utf-8').splitlines()
            for line in lines[:10]:
                if line.startswith('# vispy:') and 'gallery' in line:
                    # Get what frames to grab
                    frames = line.split('gallery')[1].split(',')[0].strip()
                    frames = frames or '0'
                    frames = [int(i) for i in frames.split(':')]
                    if not frames:
                        frames = [0]
                    if len(frames) > 1:
                        frames = list(range(*frames))
                    break
            else:
                print('Ignore: %s, no hint' % name)
                continue  # gallery hint not found

            # Import module and prepare
            print('Grab:   %s screenshots (%s)' % (name, len(frames)))
            try:
                m = imp.load_source('vispy_example_' + name, filename)
            except Exception as exp:
                print('*Err*:  %s, got "%s"' % (name, str(exp)))
            m.done = False
            m.frame = -1
            m.images = []

            # Create a canvas and grab a screenshot
            def grabscreenshot(event):
                if m.done:
                    return  # Grab only once
                m.frame += 1
                if m.frame in frames:
                    frames.remove(m.frame)
                    im = _screenshot((0, 0, c.size[0], c.size[1]))
                    # Ensure we don't have alpha silliness
                    im = np.array(im)
                    im[:, :, 3] = 255
                    m.images.append(im)
                if not frames:
                    m.done = True
            # Get canvas
            if hasattr(m, 'canvas'):
                c = m.canvas  # scene examples
            elif hasattr(m, 'Canvas'):
                c = m.Canvas()
            elif hasattr(m, 'fig'):
                c = m.fig
            else:
                print('Ignore: %s, no canvas' % name)
                continue
            c.events.draw.connect(grabscreenshot)
            # Show it and draw as many frames as needed
            with c:
                n = 0
                limit = 10000
                while not m.done and n < limit:
                    c.update()
                    c.app.process_events()
                    n += 1
                if n >= limit or len(frames) > 0:
                    raise RuntimeError('Could not collect image for %s' % name)
            # Save
            imsave(imagefilename, m.images[0])  # Alwats show one image
            if len(m.images) > 1:
                import imageio  # multiple gif not properly supported yet
                imageio.mimsave(imagefilename[:-3] + '.gif', m.images)
 def save_screenshot(self):
     img = gloo.util._screenshot((0, 0, self.size[0], self.size[1]))
     imsave("capture.png", img)
Beispiel #11
0
def write_fig_canvas(filename, canvas, widget=None, autocrop=False,
                     region=None, print_size=None, unit='centimeter', dpi=300.,
                     factor=1., bgcolor=None, transparent=False):
    """Export a canvas as a figure.

    Parameters
    ----------
    filename : string
        Name of the figure to export.
    canvas : VisPy canvas
        The vispy canvas to export.
    widget : PyQt widget | None
        The widget parent of the canvas.
    autocrop : bool | False
        Auto-cropping argument to remove useless space.
    region : tuple/list | None
        The region to export (x_start, y_start, width, height).
    print_size : tuple | None
        The desired print size. This argument should be used in association
        with the dpi and unit inputs. print_size describe should be a tuple
        of two floats describing (width, height) of the exported image for
        a specific dpi level. The final image might not have the exact
        desired size but will try instead to find a compromize
        regarding to the proportion of width/height of the original image.
    unit : {'centimeter', 'millimeter', 'pixel', 'inch'}
        Unit of the printed size.
    dpi : float | 300.
        Dots per inch for printing the image.
    factor : float | None
        If you don't want to use the print_size input, factor simply
        multiply the resolution of your screen.
    bgcolor : array_like/string | None
        Background color of the canvas.
    transparent : bool | False
        Use transparent background.
    """
    from ..utils import piccrop
    from vispy.io import imsave

    # Get the size of the canvas and backend :
    c_size = canvas.size
    b_size = canvas._backend._physical_size

    # If the GUI is displayed, c_size and b_size should be equals. If not,
    # and if the canvas is resizable, the canvas might have a different size
    # because it hasn't been updated. In that case, we force the canvas to have
    # the same size as the backend :
    if c_size != b_size:
        canvas.size = b_size

    # Backup size / background color :
    backup_size = canvas.physical_size
    backup_bgcolor = canvas.bgcolor

    # dpi checking :
    if print_size is None:
        logger.warning("dpi parameter is not active if `print_size` is None. "
                       "Use for example `print_size=(5, 5)`")

    # User select a desired print size with at a specific dpi :
    if print_size is not None:
        # Type checking :
        if not isinstance(print_size, (tuple, list)):
            raise TypeError("The print_size must either be a tuple or a list "
                            "describing the (width, height) of the"
                            " image in %s" % unit)
        # Check print size :
        if not all([isinstance(k, (int, float)) for k in print_size]):
            raise TypeError("print_size must be a tuple describing the "
                            "(width, height) of the image in %s" % unit)

        print_size = np.asarray(print_size)
        # If the user select the auto-croping option, the canvas must be render
        # before :
        if autocrop:
            img = canvas.render()
            s_output = piccrop(img)[:, :, 0].shape
            logger.info("Image cropped to closest non-backround pixels")
        else:
            s_output = b_size
        # Unit conversion :
        if unit == 'millimeter':
            mult = 1. / (10. * 2.54)
        elif unit == 'centimeter':
            mult = 1. / 2.54
        elif unit == 'pixel':
            mult = 1. / dpi
        elif unit == 'inch':
            mult = 1.
        else:
            raise ValueError("The unit must either be 'millimeter', "
                             "'centimeter', 'pixel' or 'inch' and not " + unit)
        # Get the factor to apply to the canvas size. This factor is defined as
        # the mean required float to get either the desired width/height.
        # Note that the min or the max can also be used instead.
        factor = np.mean(print_size * dpi * mult / np.asarray(s_output))

    # Multply the original canvas size :
    if factor is not None:
        # Get the new width and height :
        new_width = int(b_size[0] * factor)
        new_height = int(b_size[1] * factor)
        # Set it to the canvas, backend and the widget :
        canvas._backend._vispy_set_physical_size(new_width, new_height)
        canvas.size = (new_width, new_height)
        if widget is not None:
            widget.size = (new_width, new_height)

    # Background color and transparency :
    if bgcolor is not None:
        canvas.bgcolor = color2vb(bgcolor, alpha=1.)
    if transparent:
        canvas.bgcolor = [0.] * 4

    # Render the canvas :
    try:
        img = canvas.render(region=region)
    except:
        raise ValueError("Can not render the canvas. Try to decrease the "
                         "resolution")

    # Set to the canvas it's previous size :
    canvas._backend._physical_size = backup_size
    canvas.size = backup_size
    canvas.bgcolor = backup_bgcolor

    # Matplotlib render :
    if CONFIG['MPL_RENDER'] or not isinstance(filename, str):
        return img

    # Remove alpha for files that are not png or tiff :
    if os.path.splitext(filename)[1] not in ['.png', '.tiff']:
        img = img[..., 0:-1]

    # Apply auto-cropping to the image :
    if autocrop:
        img = piccrop(img)
        logger.info("Image cropped to closest non-backround pixels")
    # Save it :
    imsave(filename, img)
    px = tuple(img[:, :, 0].T.shape)
    logger.info("Image of size %rpx successfully saved (%s)" % (px, filename))
Beispiel #12
0
 def _images_screenshots(self):
     # Prepare
     import imp
     from vispy.io import imsave, _screenshot
     examples_dir = os.path.join(ROOT_DIR, 'examples')
     gallery_dir = os.path.join(IMAGES_DIR, 'gallery')
     
     # Process all files ...
     for filename, name in get_example_filenames(examples_dir):
         name = name.replace('/', '__')  # We use flat names
         imagefilename = os.path.join(gallery_dir, name+'.png')
         
         # Check if should make a screenshot
         frames = []
         lines = open(filename, 'rt').read().splitlines()
         for line in lines[:10]:
             if line.startswith('# vispy:') and 'gallery' in line:
                 # Get what frames to grab
                 frames = line.split('gallery')[1].strip()
                 frames = frames or '0'
                 frames = [int(i) for i in frames.split(':')]
                 if not frames:
                     frames = [0]
                 if len(frames)>1:
                     frames = list(range(*frames))
                 break
         else:
             continue  # gallery hint not found
         
         # Check if we need to take a sceenshot
         if os.path.isfile(imagefilename):
             print('Screenshot for %s already present (skip).' % name)
             continue
         
         # Import module and prepare
         m = imp.load_source('vispy_example_'+name, filename)
         m.done = False
         m.frame = -1
         m.images = []
         
         # Create a canvas and grab a screenshot
         def grabscreenshot(event):
             if m.done: return  # Grab only once
             m.frame += 1
             if m.frame in frames:
                 frames.remove(m.frame)
                 print('Grabbing a screenshot for %s' % name)
                 im = _screenshot((0, 0, c.size[0], c.size[1]))
                 m.images.append(im)
             if not frames:
                 m.done = True
         c = m.Canvas()
         c.events.paint.connect(grabscreenshot)
         c.show()
         while not m.done:
             m.app.process_events()
         c.close()
         
         # Save
         imsave(imagefilename, m.images[0])  # Alwats show one image
         if len(m.images) > 1:
             import imageio  # multiple gif not properly supported yet
             imageio.mimsave(imagefilename[:-3]+'.gif', m.images)
def write_fig_canvas(filename,
                     canvas,
                     widget=None,
                     autocrop=False,
                     region=None,
                     print_size=None,
                     unit='centimeter',
                     dpi=300.,
                     factor=1.,
                     bgcolor=None,
                     transparent=False):
    """Export a canvas as a figure.

    Parameters
    ----------
    filename : string
        Name of the figure to export.
    canvas : VisPy canvas
        The vispy canvas to export.
    widget : PyQt widget | None
        The widget parent of the canvas.
    autocrop : bool | False
        Auto-cropping argument to remove useless space.
    region : tuple/list | None
        The region to export (x_start, y_start, width, height).
    print_size : tuple | None
        The desired print size. This argument should be used in association
        with the dpi and unit inputs. print_size describe should be a tuple
        of two floats describing (width, height) of the exported image for
        a specific dpi level. The final image might not have the exact
        desired size but will try instead to find a compromize
        regarding to the proportion of width/height of the original image.
    unit : {'centimeter', 'millimeter', 'pixel', 'inch'}
        Unit of the printed size.
    dpi : float | 300.
        Dots per inch for printing the image.
    factor : float | None
        If you don't want to use the print_size input, factor simply
        multiply the resolution of your screen.
    bgcolor : array_like/string | None
        Background color of the canvas.
    transparent : bool | False
        Use transparent background.
    """
    from ..utils import piccrop
    from vispy.io import imsave

    # Get the size of the canvas and backend :
    c_size = canvas.size
    b_size = canvas._backend._physical_size

    # If the GUI is displayed, c_size and b_size should be equals. If not,
    # and if the canvas is resizable, the canvas might have a different size
    # because it hasn't been updated. In that case, we force the canvas to have
    # the same size as the backend :
    if c_size != b_size:
        canvas.size = b_size

    # Backup size / background color :
    backup_size = canvas.physical_size
    backup_bgcolor = canvas.bgcolor

    # User select a desired print size with at a specific dpi :
    if print_size is not None:
        # Type checking :
        if not isinstance(print_size, (tuple, list, np.ndarray)):
            raise TypeError("The print_size must either be a tuple, list or a "
                            "NumPy array describing the (width, height) of the"
                            " image in " + unit)
        # Check print size :
        if not all([isinstance(k, (int, float)) for k in print_size]):
            raise TypeError("print_size must be a tuple describing the "
                            "(width, height) of the image in " + unit)
        print_size = np.asarray(print_size)
        # If the user select the auto-croping option, the canvas must be render
        # before :
        if autocrop:
            img = canvas.render()
            s_output = piccrop(img)[:, :, 0].shape
        else:
            s_output = b_size
        # Unit conversion :
        if unit == 'millimeter':
            mult = 1. / (10. * 2.54)
        elif unit == 'centimeter':
            mult = 1. / 2.54
        elif unit == 'pixel':
            mult = 1. / dpi
        elif unit == 'inch':
            mult = 1.
        else:
            raise ValueError("The unit must either be 'millimeter', "
                             "'centimeter', 'pixel' or 'inch' and not " + unit)
        # Get the factor to apply to the canvas size. This factor is defined as
        # the mean required float to get either the desired width/height.
        # Note that the min or the max can also be used instead.
        factor = np.mean(print_size * dpi * mult / np.asarray(s_output))

    # Multply the original canvas size :
    if factor is not None:
        # Get the new width and height :
        new_width = int(b_size[0] * factor)
        new_height = int(b_size[1] * factor)
        # Set it to the canvas, backend and the widget :
        canvas._backend._vispy_set_physical_size(new_width, new_height)
        canvas.size = (new_width, new_height)
        if widget is not None:
            widget.size = (new_width, new_height)

    # Don't use transparency for jpg files :
    # transparent = transparent if splitext(filename)[1] != '.jpg' else False
    # Background color and transparency :
    if bgcolor is not None:
        canvas.bgcolor = color2vb(bgcolor, alpha=1.)
    if transparent:
        canvas.bgcolor = [0.] * 4

    # Render the canvas :
    try:
        img = canvas.render(region=region)
    except:
        raise ValueError("Can not render the canvas. Try to decrease the "
                         "resolution")

    # Apply auto-cropping to the image :
    if autocrop:
        img = piccrop(img)
    # Save it :
    imsave(filename, img)

    # Set to the canvas it's previous size :
    canvas._backend._physical_size = backup_size
    canvas.size = backup_size
    canvas.bgcolor = backup_bgcolor
 def on_draw(event):
     gloo.clear((1, 1, 1, 1))
     program.draw(gl.GL_TRIANGLE_STRIP)
     im = gloo.util._screenshot((0, 0, c.size[0], c.size[1]))
     imsave(outputFile, im)
     c.close()
Beispiel #15
0
def train_resnet18_func():

 # Parameters
    params = {'batch_size': 1,
              'shuffle': True,
              'num_workers': 0}
    max_epochs = 100


    # CUDA for PyTorch
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")
    torch.backends.cudnn.benchmark = True

    #dictionary - image_ids and correcponding labels
    input = open('AttentionBased/labels.pkl', 'rb')
    labels = cPickle.load(input)
    input.close()
 
    #image ids for training
    input = open('AttentionBased/list_train_ids.pkl', 'rb')
    list_train_ids = cPickle.load(input)
    input.close()
    
    #image ids for validation
    input = open('AttentionBased/list_val_ids.pkl', 'rb')
    list_val_ids = cPickle.load(input)
    input.close()
    
        
    partition={'train' : list_train_ids, 'validation' : list_val_ids}


    # Generators
    training_set = Dataset('train', partition['train'], labels)
    training_generator = data.dataloader.DataLoader(training_set, **params)

    validation_set = Dataset('val', partition['validation'], labels)
    validation_generator = data.dataloader.DataLoader(validation_set, **params)


    #creating the model - loading pretrained resnet18
   
    net=torch.load('AttentionBased/pretrained_resnet18.pth')
    cntr=0
    lt=10
    for child in net.children():
        cntr+=1
        if cntr < lt:
            for param in child.parameters():
                param.requires_grad = False

    
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.0000001, momentum=0.9)

    
    for epoch in range(max_epochs):
        train_log = open("AttentionBased/logs/train_log.txt","a") 
        val_log = open("AttentionBased/logs/val_log.txt","a") 
        train_log_str=""
        val_log_str=""
        # Training
        net.train()

        i=0
        running_loss = 0.0
        running_loss_val = 0.0
        for local_batch, local_labels in training_generator:

            i=i+1
            img_id=local_batch['img_id']
            
            time1 = time.time()

        # zero the parameter gradients
            optimizer.zero_grad()
            

        # forward + backward + optimize
            x1=np.asarray(local_batch['img_input']).astype(float)
            x1=torch.Tensor(x1)
            
            x2=local_batch['dim_input']
            
            
            y_tensor = torch.tensor(local_labels, dtype=torch.long, device=device)

            att_map, outputs = net(x1, x2)
            
            
            #   saving attention maps
            for f in range(64):
                ar=att_map[f, :, :]
                ar=ar.detach().numpy()
                ar=scipy.ndimage.zoom(ar, 16, order=0)
                filename='AttentionBased/maps/'+str(img_id)+'_attention_map_'+str(f)+'_epoch_'+str(epoch)+'.png'
                imsave(filename, ar)
                
            loss = criterion(outputs, y_tensor)
            loss.backward()
            optimizer.step()

          # saving statistics
            running_loss += loss.item()
            time2 = time.time()
            log_str='**************************training statistics: [%d, %5d, time: %.5f] loss: %.3f \n' % (epoch + 1, i, time2-time1, running_loss / i)
            print(log_str)
            train_log_str+=log_str
           
            
            
            

            # Validation
        net.eval()
        j=0
        running_loss_val = 0.0
        with torch.set_grad_enabled(False):
            for local_batch_val, local_labels_val in validation_generator:
                   
                j=j+1
                
                x1_val=np.asarray(local_batch_val['img_input']).astype(float)
                x1_val=torch.Tensor(x1_val)
                    
                x2_val=local_batch_val['dim_input']
                    
                    
                y_tensor_val = torch.tensor(local_labels_val, dtype=torch.long, device=device)

                atten_map, outputs_val = net(x1_val, x2_val)
                loss_val = criterion(outputs_val, y_tensor_val)

        
                # saving statistics
                running_loss_val += loss_val.item()
                log_str='***************************validation statistics: [%d, %5d] loss: %.3f \n' % (epoch + 1, j, running_loss_val/j)
                print(log_str)
                val_log_str+=log_str
        
        torch.save(net, 'AttentionBased/models/resnet18_weights_epoch_'+str(epoch)+'.pth')
        
        train_log.write(train_log_str)
        val_log.write(val_log_str)
        train_log.close()
        val_log.close()
Beispiel #16
0
    def _images_screenshots(self):
        # Prepare
        import imp
        from vispy.io import imsave
        from vispy.gloo.util import _screenshot
        examples_dir = op.join(ROOT_DIR, 'examples')
        gallery_dir = op.join(IMAGES_DIR, 'gallery')

        # Process all files ...
        for filename, name in get_example_filenames(examples_dir):
            name = name.replace('/', '__')  # We use flat names
            imagefilename = op.join(gallery_dir, name + '.png')

            # Check if we need to take a sceenshot
            if op.isfile(imagefilename):
                print('Skip:   %s screenshot already present.' % name)
                continue

            # Check if should make a screenshot
            frames = []
            lines = open(filename, 'rt').read().splitlines()
            for line in lines[:10]:
                if line.startswith('# vispy:') and 'gallery' in line:
                    # Get what frames to grab
                    frames = line.split('gallery')[1].split(',')[0].strip()
                    frames = frames or '0'
                    frames = [int(i) for i in frames.split(':')]
                    if not frames:
                        frames = [0]
                    if len(frames) > 1:
                        frames = list(range(*frames))
                    break
            else:
                print('Ignore: %s, no hint' % name)
                continue  # gallery hint not found

            # Import module and prepare
            print('Grab:   %s screenshots (%s)' % (name, len(frames)))
            try:
                m = imp.load_source('vispy_example_' + name, filename)
            except Exception as exp:
                print('*Err*:  %s, got "%s"' % (name, str(exp)))
            m.done = False
            m.frame = -1
            m.images = []

            # Create a canvas and grab a screenshot
            def grabscreenshot(event):
                if m.done:
                    return  # Grab only once
                m.frame += 1
                if m.frame in frames:
                    frames.remove(m.frame)
                    im = _screenshot((0, 0, c.size[0], c.size[1]))
                    # Ensure we don't have alpha silliness
                    im = np.array(im)
                    im[:, :, 3] = 255
                    m.images.append(im)
                if not frames:
                    m.done = True

            # Get canvas
            if hasattr(m, 'canvas'):
                c = m.canvas  # scene examples
            elif hasattr(m, 'Canvas'):
                c = m.Canvas()
            else:
                print('Ignore: %s, no canvas' % name)
            c.events.draw.connect(grabscreenshot)
            # Show it and draw as many frames as needed
            with c:
                n = 0
                limit = 10000
                while not m.done and n < limit:
                    c.update()
                    c.app.process_events()
                    n += 1
                if n >= limit or len(frames) > 0:
                    raise RuntimeError('Could not collect image for %s' % name)
            # Save
            imsave(imagefilename, m.images[0])  # Alwats show one image
            if len(m.images) > 1:
                import imageio  # multiple gif not properly supported yet
                imageio.mimsave(imagefilename[:-3] + '.gif', m.images)
	def on_draw(event):
	    gloo.clear((1,1,1,1))
	    program.draw(gl.GL_TRIANGLE_STRIP)
	    im = gloo.util._screenshot((0, 0, c.size[0], c.size[1]))
	    imsave(outputFile, im)
	    c.close()
Beispiel #18
0
 def save_frame(self, filename, frame_index=0):
     imsave(filename, self._collected_images[frame_index])