Esempio n. 1
0
class FuncPlotter:
    def __init__(self, DOM, trace_image=None, width_range=None):
        """Draws the graphics functions.

        Parameters:
        trace_image    - a string specifying the name of the image file to trace
                        or QImage object
        width_range    - a list of two values defining the range of thickness in the trace line
        """
        self.document = DOM
        self.trace_image = trace_image
        self.width_range = width_range
        self.img = None

        if trace_image is None:
            trace_image = DOM.image

        if isinstance(trace_image, QImage):
            self.img = trace_image
        elif trace_image and os.path.exists(trace_image):
            # image tracing shall consist of 256 index colors
            # ranked by the number of white (grayscale)
            from .image import grayscale
            self.img = QImage(trace_image)
            self.img = grayscale(self.img)

        if trace_image:
            self.img_w, self.img_h = self.img.width(), self.img.height()
            self.img_colors = float(self.img.colorCount() - 1)
            self.img_pixelIndex = self.img.pixelIndex

    def _trace_image(self, path, width_range):
        """Changes the coordinates (coords) curve as a function of the image.

        Here we take two neighboring points and calculate the angle (alpha), under which there is a line,
        perpendicular to the tangent to the curve. With this angle we hold
        connecting the ends of two parallel curves (right and left) to replace the old (coords),
        separated by a distance that depends on the index of the image.
        """

        delta = (width_range[1] - width_range[0]) / 2.0
        min_width = width_range[0] / 2.0

        # OPTIMIZATION
        img_pixelIndex = self.img_pixelIndex
        img_w, img_h = self.img_w, self.img_h
        img_colors = self.img_colors
        scale = self.document.scale

        canvas_x1 = self.document.x1
        canvas_y1 = self.document.y1
        canvas_dx = self.document.dx / img_w
        canvas_dy = self.document.dy / img_h

        for i in range(len(path.node)):
            x, y = path.node[i].x, path.node[i].y
            pixel_x = int((x - canvas_x1) / canvas_dx)
            pixel_y = int((y - canvas_y1) / canvas_dy)
            if pixel_x >= 0 and pixel_x < img_w \
                    and pixel_y >= 0 and pixel_y < img_h:
                k = 1 - img_pixelIndex(pixel_x, pixel_y) / img_colors
                d = (min_width + k * delta) * scale
                path.node[i].d = d
            #else:
            #    k = 0
            #d = (min_width + k * delta) * scale
            #path.node[i].d = d
        return path

    def auto_resolution(self, fX, fY, T):
        p = makePathData(fX, fY, T, res=0.25 / self.scale)
        _, _, w, h = p.boundingRect()
        return max(w, h) * 0.5

    def auto_resolution2(self, fX, fY, T):
        p = makePathData(fX, fY, T, res=0.25 / self.document.scale)
        l = p.length() if p else 0
        return l / (T[1] - T[0])

    def append_func(self,
                    fX,
                    fY,
                    T,
                    res=1,
                    color='black',
                    width=3,
                    close_path=False):
        """Adds a graph of the functions fX (t) and fY (t).

        fX            - a function of one variable, calculates the coordinates of x or
                        function of the radius of the corner, if the following option (fY) returns False
        fY            - function of one variable that calculates the y-coordinate, or None
        T            - a list of two values ​​defining the range of the variable
        color        - a string that specifies the stroke color of the curve, you can use named values
                        of the SVG specification, or 'none'
        width        - the thickness of the circuit
        close_path    - parameter that indicates whether or not to close the curve
        """

        # Step 1. Make Path
        pathData = makePathData(fX, fY, T, res / self.document.scale,
                                close_path)
        if not (pathData):
            return False

        # Step 2. If there is a picture, tracing
        if self.img:
            pathData = self._trace_image(pathData, self.width_range)
            # This must be added the code division ways apart.
            # Convert line to polygon
            path = split(pathData)
        # Step 3. Append Path to data
        if len(path[0]) > 0:
            self.document.data.append(path)
            return True
        else:
            return False
Esempio n. 2
0
    def readBmp(self, file, len=None, off=0, silent=False, rotate=True):
        """ Reads DOC-standard bat recordings in 8x row-compressed BMP format.
            For similarity with readWav, accepts len and off args, in seconds.
            rotate: if True, rotates to match setImage and other spectrograms (rows=time)
                otherwise preserves normal orientation (cols=time)
        """
        # !! Important to set these, as they are used in other functions
        self.sampleRate = 176000
        self.incr = 512

        img = QImage(file, "BMP")
        h = img.height()
        w = img.width()
        colc = img.colorCount()
        if h == 0 or w == 0:
            print("ERROR: image was not loaded")
            return (1)

        # Check color format and convert to grayscale
        if not silent and (not img.allGray() or colc > 256):
            print(
                "Warning: image provided not in 8-bit grayscale, information will be lost"
            )
        img.convertTo(QImage.Format_Grayscale8)

        # Convert to numpy
        # (remember that pyqtgraph images are column-major)
        ptr = img.constBits()
        ptr.setsize(h * w * 1)
        img2 = np.array(ptr).reshape(h, w)

        # Determine if original image was rotated, based on expected num of freq bins and freq 0 being empty
        # We also used to check if np.median(img2[-1,:])==0,
        # but some files happen to have the bottom freq bin around 90, so we cannot rely on that.
        if h == 64:
            # standard DoC format
            pass
        elif w == 64:
            # seems like DoC format, rotated at -90*
            img2 = np.rot90(img2, 1, (1, 0))
            w, h = h, w
        else:
            print("ERROR: image does not appear to be in DoC format!")
            print("Format details:")
            print(img2)
            print(h, w)
            print(min(img2[-1, :]), max(img2[-1, :]))
            print(np.sum(img2[-1, :] > 0))
            print(np.median(img2[-1, :]))
            return (1)

        # Could skip that for visual mode - maybe useful for establishing contrast?
        img2[-1, :] = 254  # lowest freq bin is 0, flip that
        img2 = 255 - img2  # reverse value having the black as the most intense
        img2 = img2 / np.max(img2)  # normalization
        img2 = img2[:,
                    1:]  # Cutting first time bin because it only contains the scale and cutting last columns
        img2 = np.repeat(
            img2, 8,
            axis=0)  # repeat freq bins 7 times to fit invertspectrogram

        self.data = []
        self.fileLength = (w - 2) * self.incr + self.window_width  # in samples
        # Alternatively:
        # self.fileLength = self.convertSpectoAmpl(h-1)*self.sampleRate

        # NOTE: conversions will use self.sampleRate and self.incr, so ensure those are already set!
        # trim to specified offset and length:
        if off > 0 or len is not None:
            # Convert offset from seconds to pixels
            off = int(self.convertAmpltoSpec(off))
            if len is None:
                img2 = img2[:, off:]
            else:
                # Convert length from seconds to pixels:
                len = int(self.convertAmpltoSpec(len))
                img2 = img2[:, off:(off + len)]

        if rotate:
            # rotate for display, b/c required spectrogram dimensions are:
            #  t increasing over rows, f increasing over cols
            # This will be enough if the original image was spectrogram-shape.
            img2 = np.rot90(img2, 1, (1, 0))

        self.sg = img2

        if QtMM:
            self.audioFormat.setChannelCount(0)
            self.audioFormat.setSampleSize(0)
            self.audioFormat.setSampleRate(self.sampleRate)
        #else:
        #self.audioFormat['channelCount'] = 0
        #self.audioFormat['sampleSize'] = 0
        #self.audioFormat['sampleRate'] = self.sampleRate

        self.minFreq = 0
        self.maxFreq = self.sampleRate // 2
        self.minFreqShow = max(self.minFreq, self.minFreqShow)
        self.maxFreqShow = min(self.maxFreq, self.maxFreqShow)

        if not silent:
            print("Detected BMP format: %d x %d px, %d colours" % (w, h, colc))
        return (0)