Esempio n. 1
0
    def draw_image(self, viewer, dstarr, whence=0.0):
        if self.image is None:
            return

        cache = self.get_cache(viewer)

        dst_order = viewer.get_rgb_order()
        image_order = self.image.get_order()

        if (whence <= 0.0) or (cache.cutout is None) or (not self.optimize):
            # get extent of our data coverage in the window
            pts = np.asarray(viewer.get_pan_rect()).T
            xmin = int(np.min(pts[0]))
            ymin = int(np.min(pts[1]))
            xmax = int(np.ceil(np.max(pts[0])))
            ymax = int(np.ceil(np.max(pts[1])))

            # get destination location in data_coords
            dst_x, dst_y = self.crdmap.to_data((self.x, self.y))

            a1, b1, a2, b2 = 0, 0, self.image.width - 1, self.image.height - 1

            # calculate the cutout that we can make and scale to merge
            # onto the final image--by only cutting out what is necessary
            # this speeds scaling greatly at zoomed in sizes
            ((dst_x, dst_y), (a1, b1), (a2, b2)) = \
                trcalc.calc_image_merge_clip((xmin, ymin), (xmax, ymax),
                                             (dst_x, dst_y),
                                             (a1, b1), (a2, b2))

            # is image completely off the screen?
            if (a2 - a1 <= 0) or (b2 - b1 <= 0):
                # no overlay needed
                return

            # cutout and scale the piece appropriately by the viewer scale
            scale_x, scale_y = viewer.get_scale_xy()
            # scale additionally by our scale
            _scale_x, _scale_y = scale_x * self.scale_x, scale_y * self.scale_y

            res = self.image.get_scaled_cutout2((a1, b1), (a2, b2),
                                                (_scale_x, _scale_y),
                                                method=self.interpolation)

            # don't ask for an alpha channel from overlaid image if it
            # doesn't have one
            ## if ('A' in dst_order) and not ('A' in image_order):
            ##     dst_order = dst_order.replace('A', '')

            ## if dst_order != image_order:
            ##     # reorder result to match desired rgb_order by backend
            ##     cache.cutout = trcalc.reorder_image(dst_order, res.data,
            ##                                          image_order)
            ## else:
            ##     cache.cutout = res.data
            data = res.data
            if self.flipy:
                data = np.flipud(data)
            cache.cutout = data

            # calculate our offset from the pan position
            pan_x, pan_y = viewer.get_pan()
            pan_off = viewer.data_off
            pan_x, pan_y = pan_x + pan_off, pan_y + pan_off
            off_x, off_y = dst_x - pan_x, dst_y - pan_y
            # scale offset
            off_x *= scale_x
            off_y *= scale_y

            # dst position in the pre-transformed array should be calculated
            # from the center of the array plus offsets
            ht, wd, dp = dstarr.shape
            cvs_x = int(np.round(wd / 2.0 + off_x))
            cvs_y = int(np.round(ht / 2.0 + off_y))
            cache.cvs_pos = (cvs_x, cvs_y)

        # composite the image into the destination array at the
        # calculated position
        trcalc.overlay_image(dstarr, cache.cvs_pos, cache.cutout,
                             dst_order=dst_order, src_order=image_order,
                             alpha=self.alpha, fill=True, flipy=False)
Esempio n. 2
0
    def draw_image(self, viewer, dstarr, whence=0.0):
        if self.image is None:
            return

        cache = self.get_cache(viewer)

        if (whence <= 0.0) or (cache.cutout is None) or (not self.optimize):
            # get extent of our data coverage in the window
            pts = np.asarray(viewer.get_pan_rect()).T
            xmin = int(np.min(pts[0]))
            ymin = int(np.min(pts[1]))
            xmax = int(np.ceil(np.max(pts[0])))
            ymax = int(np.ceil(np.max(pts[1])))

            # destination location in data_coords
            dst_x, dst_y = self.crdmap.to_data((self.x, self.y))

            a1, b1, a2, b2 = 0, 0, self.image.width - 1, self.image.height - 1

            # calculate the cutout that we can make and scale to merge
            # onto the final image--by only cutting out what is necessary
            # this speeds scaling greatly at zoomed in sizes
            ((dst_x, dst_y), (a1, b1), (a2, b2)) = \
                trcalc.calc_image_merge_clip((xmin, ymin), (xmax, ymax),
                                             (dst_x, dst_y),
                                             (a1, b1), (a2, b2))

            # is image completely off the screen?
            if (a2 - a1 <= 0) or (b2 - b1 <= 0):
                # no overlay needed
                return

            # cutout and scale the piece appropriately by viewer scale
            scale_x, scale_y = viewer.get_scale_xy()
            # scale additionally by our scale
            _scale_x, _scale_y = scale_x * self.scale_x, scale_y * self.scale_y

            res = self.image.get_scaled_cutout2((a1, b1), (a2, b2),
                                                (_scale_x, _scale_y),
                                                method=self.interpolation)
            cache.cutout = res.data

            # calculate our offset from the pan position
            pan_x, pan_y = viewer.get_pan()
            pan_off = viewer.data_off
            pan_x, pan_y = pan_x + pan_off, pan_y + pan_off
            off_x, off_y = dst_x - pan_x, dst_y - pan_y
            # scale offset
            off_x *= scale_x
            off_y *= scale_y

            # dst position in the pre-transformed array should be calculated
            # from the center of the array plus offsets
            ht, wd, dp = dstarr.shape
            cvs_x = int(np.round(wd / 2.0 + off_x))
            cvs_y = int(np.round(ht / 2.0 + off_y))
            cache.cvs_pos = (cvs_x, cvs_y)

        if self.rgbmap is not None:
            rgbmap = self.rgbmap
        else:
            rgbmap = viewer.get_rgbmap()

        if (whence <= 1.0) or (cache.prergb is None) or (not self.optimize):
            # apply visual changes prior to color mapping (cut levels, etc)
            vmax = rgbmap.get_hash_size() - 1
            newdata = self.apply_visuals(viewer, cache.cutout, 0, vmax)

            # result becomes an index array fed to the RGB mapper
            if not np.issubdtype(newdata.dtype, np.dtype('uint')):
                newdata = newdata.astype(np.uint)
            idx = newdata

            self.logger.debug("shape of index is %s" % (str(idx.shape)))
            cache.prergb = idx

        dst_order = viewer.get_rgb_order()
        image_order = self.image.get_order()
        get_order = dst_order
        # note: is this still needed?  I think overlay_image will handle
        # a mismatch of alpha channel now
        if ('A' in dst_order) and not ('A' in image_order):
            get_order = dst_order.replace('A', '')

        if (whence <= 2.5) or (cache.rgbarr is None) or (not self.optimize):
            # get RGB mapped array
            rgbobj = rgbmap.get_rgbarray(cache.prergb, order=dst_order,
                                         image_order=image_order)
            cache.rgbarr = rgbobj.get_array(get_order)

        # composite the image into the destination array at the
        # calculated position
        trcalc.overlay_image(dstarr, cache.cvs_pos, cache.rgbarr,
                             dst_order=dst_order, src_order=get_order,
                             alpha=self.alpha, fill=True, flipy=False)
Esempio n. 3
0
    def draw_image(self, dstarr):
        #print "drawing image at %f,%f" % (self.x, self.y)

        # get extent of our data coverage in the window
        ((x0, y0), (x1, y1), (x2, y2), (x3, y3)) = self.fitsimage.get_pan_rect()
        xmin = int(min(x0, x1, x2, x3))
        ymin = int(min(y0, y1, y2, y3))
        xmax = int(max(x0, x1, x2, x3))
        ymax = int(max(y0, y1, y2, y3))

        # destination location in data_coords
        #dst_x, dst_y = self.x, self.y + ht
        dst_x, dst_y = self.x, self.y
        #print "actual placement at %d,%d" % (dst_x, dst_y)
        
        a1, b1, a2, b2 = 0, 0, self.image.width, self.image.height

        # calculate the cutout that we can make and scale to merge
        # onto the final image--by only cutting out what is necessary
        # this speeds scaling greatly at zoomed in sizes
        dst_x, dst_y, a1, b1, a2, b2 = \
               trcalc.calc_image_merge_clip(xmin, ymin, xmax, ymax,
                                            dst_x, dst_y, a1, b1, a2, b2)
        #a1, b1, a2, b2 = 0, 0, self.image.width, self.image.height
        #print "a1,b1=%d,%d a2,b2=%d,%d" % (a1, b1, a2, b2)

        # is image completely off the screen?
        if (a2 - a1 <= 0) or (b2 - b1 <= 0):
            # no overlay needed
            #print "no overlay needed"
            return

        # scale the cutout according to the current viewer scale
        order = self.fitsimage.get_rgb_order()
        ## if 'A' in order:
        ##     order = order.replace('A', '')
        srcdata = self.image.get_array(order)
        #print "order=%s srcdata=%s" % (order, srcdata.shape)
        scale_x, scale_y = self.fitsimage.get_scale_xy()
        (newdata, (nscale_x, nscale_y)) = \
                  trcalc.get_scaled_cutout_basic(srcdata, a1, b1, a2, b2,
                                                 scale_x, scale_y)
        
        # calculate our offset from the pan position
        pan_x, pan_y = self.fitsimage.get_pan()
        #print "pan x,y=%f,%f" % (pan_x, pan_y)
        off_x, off_y = dst_x - pan_x, dst_y - pan_y
        # scale offset
        off_x *= scale_x
        off_y *= scale_y
        #print "off_x,y=%f,%f" % (off_x, off_y)

        # dst position in the pre-transformed array should be calculated
        # from the center of the array plus offsets
        ht, wd, dp = dstarr.shape
        x = int(round(wd / 2.0  + off_x))
        y = int(round(ht / 2.0  + off_y))

        # composite the image into the destination array at the
        # calculated position
        trcalc.overlay_image(dstarr, x, y, newdata, alpha=self.alpha,
                             flipy=self.flipy)
Esempio n. 4
0
    def draw_image(self, viewer, dstarr, whence=0.0):
        if self.image is None:
            return

        #print("redraw whence=%f" % (whence))
        cache = self.get_cache(viewer)

        if (whence <= 0.0) or (cache.cutout is None) or (not self.optimize):
            # get extent of our data coverage in the window
            ((x0, y0), (x1, y1), (x2, y2), (x3, y3)) = viewer.get_pan_rect()
            xmin = int(min(x0, x1, x2, x3))
            ymin = int(min(y0, y1, y2, y3))
            xmax = int(max(x0, x1, x2, x3))
            ymax = int(max(y0, y1, y2, y3))

            # destination location in data_coords
            dst_x, dst_y = self.x, self.y

            a1, b1, a2, b2 = 0, 0, self.image.width, self.image.height

            # calculate the cutout that we can make and scale to merge
            # onto the final image--by only cutting out what is necessary
            # this speeds scaling greatly at zoomed in sizes
            dst_x, dst_y, a1, b1, a2, b2 = \
                   trcalc.calc_image_merge_clip(xmin, ymin, xmax, ymax,
                                                dst_x, dst_y, a1, b1, a2, b2)

            # is image completely off the screen?
            if (a2 - a1 <= 0) or (b2 - b1 <= 0):
                # no overlay needed
                #print "no overlay needed"
                return

            # cutout and scale the piece appropriately by viewer scale
            scale_x, scale_y = viewer.get_scale_xy()
            # scale additionally by our scale
            _scale_x, _scale_y = scale_x * self.scale_x, scale_y * self.scale_y

            res = self.image.get_scaled_cutout(a1, b1, a2, b2,
                                               _scale_x, _scale_y,
                                               method=self.interpolation)
            cache.cutout = res.data

            # calculate our offset from the pan position
            pan_x, pan_y = viewer.get_pan()
            pan_off = viewer.data_off
            pan_x, pan_y = pan_x + pan_off, pan_y + pan_off
            #print "pan x,y=%f,%f" % (pan_x, pan_y)
            off_x, off_y = dst_x - pan_x, dst_y - pan_y
            # scale offset
            off_x *= scale_x
            off_y *= scale_y
            #print "off_x,y=%f,%f" % (off_x, off_y)

            # dst position in the pre-transformed array should be calculated
            # from the center of the array plus offsets
            ht, wd, dp = dstarr.shape
            cache.cvs_x = int(round(wd / 2.0  + off_x))
            cache.cvs_y = int(round(ht / 2.0  + off_y))

        if self.rgbmap is not None:
            rgbmap = self.rgbmap
        else:
            rgbmap = viewer.get_rgbmap()

        if (whence <= 1.0) or (cache.prergb is None) or (not self.optimize):
            # apply visual changes prior to color mapping (cut levels, etc)
            vmax = rgbmap.get_hash_size() - 1
            newdata = self.apply_visuals(viewer, cache.cutout, 0, vmax)

            # result becomes an index array fed to the RGB mapper
            if not numpy.issubdtype(newdata.dtype, numpy.dtype('uint')):
                newdata = newdata.astype(numpy.uint)
            idx = newdata

            self.logger.debug("shape of index is %s" % (str(idx.shape)))
            cache.prergb = idx

        dst_order = viewer.get_rgb_order()
        image_order = self.image.get_order()
        get_order = dst_order
        if ('A' in dst_order) and not ('A' in image_order):
            get_order = dst_order.replace('A', '')

        if (whence <= 2.5) or (cache.rgbarr is None) or (not self.optimize):
            # get RGB mapped array
            rgbobj = rgbmap.get_rgbarray(cache.prergb, order=dst_order,
                                         image_order=image_order)
            cache.rgbarr = rgbobj.get_array(get_order)

        # composite the image into the destination array at the
        # calculated position
        trcalc.overlay_image(dstarr, cache.cvs_x, cache.cvs_y, cache.rgbarr,
                             dst_order=dst_order, src_order=get_order,
                             alpha=self.alpha, flipy=False)
Esempio n. 5
0
    def draw_image(self, viewer, dstarr, whence=0.0):
        if self.image is None:
            return

        cache = self.get_cache(viewer)

        #print("redraw whence=%f" % (whence))
        dst_order = viewer.get_rgb_order()
        image_order = self.image.get_order()

        if (whence <= 0.0) or (cache.cutout is None) or (not self.optimize):
            # get extent of our data coverage in the window
            ((x0, y0), (x1, y1), (x2, y2), (x3, y3)) = viewer.get_pan_rect()
            xmin = int(min(x0, x1, x2, x3))
            ymin = int(min(y0, y1, y2, y3))
            xmax = int(max(x0, x1, x2, x3))
            ymax = int(max(y0, y1, y2, y3))

            # destination location in data_coords
            #dst_x, dst_y = self.x, self.y + ht
            dst_x, dst_y = self.x, self.y

            a1, b1, a2, b2 = 0, 0, self.image.width, self.image.height

            # calculate the cutout that we can make and scale to merge
            # onto the final image--by only cutting out what is necessary
            # this speeds scaling greatly at zoomed in sizes
            dst_x, dst_y, a1, b1, a2, b2 = \
                   trcalc.calc_image_merge_clip(xmin, ymin, xmax, ymax,
                                                dst_x, dst_y, a1, b1, a2, b2)

            # is image completely off the screen?
            if (a2 - a1 <= 0) or (b2 - b1 <= 0):
                # no overlay needed
                #print "no overlay needed"
                return

            # cutout and scale the piece appropriately by the viewer scale
            scale_x, scale_y = viewer.get_scale_xy()
            # scale additionally by our scale
            _scale_x, _scale_y = scale_x * self.scale_x, scale_y * self.scale_y

            res = self.image.get_scaled_cutout(a1, b1, a2, b2,
                                               _scale_x, _scale_y,
                                               #flipy=self.flipy,
                                               method=self.interpolation)

            # don't ask for an alpha channel from overlaid image if it
            # doesn't have one
            ## if ('A' in dst_order) and not ('A' in image_order):
            ##     dst_order = dst_order.replace('A', '')

            ## if dst_order != image_order:
            ##     # reorder result to match desired rgb_order by backend
            ##     cache.cutout = trcalc.reorder_image(dst_order, res.data,
            ##                                          image_order)
            ## else:
            ##     cache.cutout = res.data
            cache.cutout = res.data

            # calculate our offset from the pan position
            pan_x, pan_y = viewer.get_pan()
            pan_off = viewer.data_off
            pan_x, pan_y = pan_x + pan_off, pan_y + pan_off
            #print "pan x,y=%f,%f" % (pan_x, pan_y)
            off_x, off_y = dst_x - pan_x, dst_y - pan_y
            # scale offset
            off_x *= scale_x
            off_y *= scale_y
            #print "off_x,y=%f,%f" % (off_x, off_y)

            # dst position in the pre-transformed array should be calculated
            # from the center of the array plus offsets
            ht, wd, dp = dstarr.shape
            cache.cvs_x = int(round(wd / 2.0  + off_x))
            cache.cvs_y = int(round(ht / 2.0  + off_y))

        # composite the image into the destination array at the
        # calculated position
        trcalc.overlay_image(dstarr, cache.cvs_x, cache.cvs_y, cache.cutout,
                             dst_order=dst_order, src_order=image_order,
                             alpha=self.alpha, flipy=False)
Esempio n. 6
0
    def draw_image(self, viewer, dstarr, whence=0.0):
        if self.image is None:
            return

        cache = self.get_cache(viewer)

        if (whence <= 0.0) or (cache.cutout is None) or (not self.optimize):
            # get extent of our data coverage in the window
            ((x0, y0), (x1, y1), (x2, y2), (x3, y3)) = viewer.get_pan_rect()
            xmin = int(min(x0, x1, x2, x3))
            ymin = int(min(y0, y1, y2, y3))
            xmax = int(numpy.ceil(max(x0, x1, x2, x3)))
            ymax = int(numpy.ceil(max(y0, y1, y2, y3)))

            # destination location in data_coords
            dst_x, dst_y = self.crdmap.to_data(self.x, self.y)

            a1, b1, a2, b2 = 0, 0, self.image.width, self.image.height

            # calculate the cutout that we can make and scale to merge
            # onto the final image--by only cutting out what is necessary
            # this speeds scaling greatly at zoomed in sizes
            ((dst_x, dst_y), (a1, b1), (a2, b2)) = \
                 trcalc.calc_image_merge_clip((xmin, ymin), (xmax, ymax),
                                              (dst_x, dst_y),
                                              (a1, b1), (a2, b2))

            # is image completely off the screen?
            if (a2 - a1 <= 0) or (b2 - b1 <= 0):
                # no overlay needed
                return

            # cutout and scale the piece appropriately by viewer scale
            scale_x, scale_y = viewer.get_scale_xy()
            # scale additionally by our scale
            _scale_x, _scale_y = scale_x * self.scale_x, scale_y * self.scale_y

            res = self.image.get_scaled_cutout2((a1, b1), (a2, b2),
                                                (_scale_x, _scale_y),
                                                method=self.interpolation)
            cache.cutout = res.data

            # calculate our offset from the pan position
            pan_x, pan_y = viewer.get_pan()
            pan_off = viewer.data_off
            pan_x, pan_y = pan_x + pan_off, pan_y + pan_off
            off_x, off_y = dst_x - pan_x, dst_y - pan_y
            # scale offset
            off_x *= scale_x
            off_y *= scale_y

            # dst position in the pre-transformed array should be calculated
            # from the center of the array plus offsets
            ht, wd, dp = dstarr.shape
            cvs_x = int(round(wd / 2.0 + off_x))
            cvs_y = int(round(ht / 2.0 + off_y))
            cache.cvs_pos = (cvs_x, cvs_y)

        if self.rgbmap is not None:
            rgbmap = self.rgbmap
        else:
            rgbmap = viewer.get_rgbmap()

        if (whence <= 1.0) or (cache.prergb is None) or (not self.optimize):
            # apply visual changes prior to color mapping (cut levels, etc)
            vmax = rgbmap.get_hash_size() - 1
            newdata = self.apply_visuals(viewer, cache.cutout, 0, vmax)

            # result becomes an index array fed to the RGB mapper
            if not numpy.issubdtype(newdata.dtype, numpy.dtype('uint')):
                newdata = newdata.astype(numpy.uint)
            idx = newdata

            self.logger.debug("shape of index is %s" % (str(idx.shape)))
            cache.prergb = idx

        dst_order = viewer.get_rgb_order()
        image_order = self.image.get_order()
        get_order = dst_order
        if ('A' in dst_order) and not ('A' in image_order):
            get_order = dst_order.replace('A', '')

        if (whence <= 2.5) or (cache.rgbarr is None) or (not self.optimize):
            # get RGB mapped array
            rgbobj = rgbmap.get_rgbarray(cache.prergb,
                                         order=dst_order,
                                         image_order=image_order)
            cache.rgbarr = rgbobj.get_array(get_order)

        # composite the image into the destination array at the
        # calculated position
        trcalc.overlay_image(dstarr,
                             cache.cvs_pos,
                             cache.rgbarr,
                             dst_order=dst_order,
                             src_order=get_order,
                             alpha=self.alpha,
                             flipy=False)
Esempio n. 7
0
    def draw_image(self, viewer, dstarr, whence=0.0):
        if self.image is None:
            return

        cache = self.get_cache(viewer)

        dst_order = viewer.get_rgb_order()
        image_order = self.image.get_order()

        if (whence <= 0.0) or (cache.cutout is None) or (not self.optimize):
            # get extent of our data coverage in the window
            ((x0, y0), (x1, y1), (x2, y2), (x3, y3)) = viewer.get_pan_rect()
            xmin = int(min(x0, x1, x2, x3))
            ymin = int(min(y0, y1, y2, y3))
            xmax = int(numpy.ceil(max(x0, x1, x2, x3)))
            ymax = int(numpy.ceil(max(y0, y1, y2, y3)))

            # destination location in data_coords
            #dst_x, dst_y = self.x, self.y + ht
            dst_x, dst_y = self.crdmap.to_data(self.x, self.y)

            a1, b1, a2, b2 = 0, 0, self.image.width, self.image.height

            # calculate the cutout that we can make and scale to merge
            # onto the final image--by only cutting out what is necessary
            # this speeds scaling greatly at zoomed in sizes
            ((dst_x, dst_y), (a1, b1), (a2, b2)) = \
                 trcalc.calc_image_merge_clip((xmin, ymin), (xmax, ymax),
                                              (dst_x, dst_y),
                                              (a1, b1), (a2, b2))

            # is image completely off the screen?
            if (a2 - a1 <= 0) or (b2 - b1 <= 0):
                # no overlay needed
                return

            # cutout and scale the piece appropriately by the viewer scale
            scale_x, scale_y = viewer.get_scale_xy()
            # scale additionally by our scale
            _scale_x, _scale_y = scale_x * self.scale_x, scale_y * self.scale_y

            res = self.image.get_scaled_cutout2(
                (a1, b1),
                (a2, b2),
                (_scale_x, _scale_y),
                #flipy=self.flipy,
                method=self.interpolation)

            # don't ask for an alpha channel from overlaid image if it
            # doesn't have one
            ## if ('A' in dst_order) and not ('A' in image_order):
            ##     dst_order = dst_order.replace('A', '')

            ## if dst_order != image_order:
            ##     # reorder result to match desired rgb_order by backend
            ##     cache.cutout = trcalc.reorder_image(dst_order, res.data,
            ##                                          image_order)
            ## else:
            ##     cache.cutout = res.data
            cache.cutout = res.data

            # calculate our offset from the pan position
            pan_x, pan_y = viewer.get_pan()
            pan_off = viewer.data_off
            pan_x, pan_y = pan_x + pan_off, pan_y + pan_off
            off_x, off_y = dst_x - pan_x, dst_y - pan_y
            # scale offset
            off_x *= scale_x
            off_y *= scale_y

            # dst position in the pre-transformed array should be calculated
            # from the center of the array plus offsets
            ht, wd, dp = dstarr.shape
            cvs_x = int(round(wd / 2.0 + off_x))
            cvs_y = int(round(ht / 2.0 + off_y))
            cache.cvs_pos = (cvs_x, cvs_y)

        # composite the image into the destination array at the
        # calculated position
        trcalc.overlay_image(dstarr,
                             cache.cvs_pos,
                             cache.cutout,
                             dst_order=dst_order,
                             src_order=image_order,
                             alpha=self.alpha,
                             flipy=False)