Example #1
0
    def render_cairo(self, cr, bounds, element, hscroll_pos, y1):
        if not self._view:
            return
        # The idea is to conceptually divide the clip into a sequence of
        # rectangles beginning at the start of the file, and
        # pixelsToNs(twidth) nanoseconds long. The thumbnail within the
        # rectangle is the frame produced from the timestamp corresponding to
        # rectangle's left edge. We speed things up by only drawing the
        # rectangles which intersect the given bounds.  FIXME: how would we
        # handle timestretch?
        height = bounds.y2 - bounds.y1
        width = bounds.x2 - bounds.x1

        # we actually draw the rectangles just to the left of the clip's in
        # point and just to the right of the clip's out-point, so we need to
        # mask off the actual bounds.
        cr.rectangle(bounds.x1, bounds.y1, width, height)
        cr.clip()

        # tdur = duration in ns of thumbnail
        # sof  = start of file in pixel coordinates
        x1 = bounds.x1
        sof = Zoomable.nsToPixel(element.start - element.in_point) +\
            hscroll_pos

        # i = left edge of thumbnail to be drawn. We start with x1 and
        # subtract the distance to the nearest leftward rectangle.
        # Justification of the following:
        #                i = sof + k * twidth
        #                i = x1 - delta
        # sof + k * twidth = x1 - delta
        #           i * tw = (x1 - sof) - delta
        #    <=>     delta = x1 - sof (mod twidth).
        # Fortunately for us, % works on floats in python.

        i = x1 - ((x1 - sof) % (self.twidth + self._spacing()))

        # j = timestamp *within the element* of thumbnail to be drawn. we want
        # timestamps to be numerically stable, but in practice this seems to
        # give good enough results. It might be possible to improve this
        # further, which would result in fewer thumbnails needing to be
        # generated.
        j = Zoomable.pixelToNs(i - sof)
        istep = self.twidth + self._spacing()
        jstep = self.tdur + Zoomable.pixelToNs(self.spacing)

        while i < bounds.x2:
            self._thumbForTime(cr, j, i, y1)
            cr.rectangle(i - 1, y1, self.twidth + 2, self.theight)
            i += istep
            j += jstep
            cr.fill()
Example #2
0
    def render_cairo(self, cr, bounds, element, hscroll_pos, y1):
        if not self._view:
            return
        # The idea is to conceptually divide the clip into a sequence of
        # rectangles beginning at the start of the file, and
        # pixelsToNs(twidth) nanoseconds long. The thumbnail within the
        # rectangle is the frame produced from the timestamp corresponding to
        # rectangle's left edge. We speed things up by only drawing the
        # rectangles which intersect the given bounds.  FIXME: how would we
        # handle timestretch?
        height = bounds.y2 - bounds.y1
        width = bounds.x2 - bounds.x1

        # we actually draw the rectangles just to the left of the clip's in
        # point and just to the right of the clip's out-point, so we need to
        # mask off the actual bounds.
        cr.rectangle(bounds.x1, bounds.y1, width, height)
        cr.clip()

        # tdur = duration in ns of thumbnail
        # sof  = start of file in pixel coordinates
        x1 = bounds.x1
        sof = Zoomable.nsToPixel(element.start - element.in_point) +\
            hscroll_pos

        # i = left edge of thumbnail to be drawn. We start with x1 and
        # subtract the distance to the nearest leftward rectangle.
        # Justification of the following:
        #                i = sof + k * twidth
        #                i = x1 - delta
        # sof + k * twidth = x1 - delta
        #           i * tw = (x1 - sof) - delta
        #    <=>     delta = x1 - sof (mod twidth).
        # Fortunately for us, % works on floats in python.

        i = x1 - ((x1 - sof) % (self.twidth + self._spacing()))

        # j = timestamp *within the element* of thumbnail to be drawn. we want
        # timestamps to be numerically stable, but in practice this seems to
        # give good enough results. It might be possible to improve this
        # further, which would result in fewer thumbnails needing to be
        # generated.
        j = Zoomable.pixelToNs(i - sof)
        istep = self.twidth + self._spacing()
        jstep = self.tdur + Zoomable.pixelToNs(self.spacing)

        while i < bounds.x2:
            self._thumbForTime(cr, j, i, y1)
            cr.rectangle(i - 1, y1, self.twidth + 2, self.theight)
            i += istep
            j += jstep
            cr.fill()
Example #3
0
 def xyToTimeValue(self, pos):
     view = self._view
     interpolator = view.interpolator
     bounds = view.bounds
     time = Zoomable.pixelToNs(pos[0] - bounds.x1) + view.element.in_point
     value = (
         (1 - (pos[1] - KW_LABEL_Y_OVERFLOW - bounds.y1 - view._min) / view._range) * interpolator.range
     ) + interpolator.lower
     return time, value
Example #4
0
 def xyToTimeValue(self, pos):
     view = self._view
     interpolator = view.interpolator
     bounds = view.bounds
     time = (Zoomable.pixelToNs(pos[0] - bounds.x1) +
             view.element.in_point)
     value = ((1 - (pos[1] - bounds.y1 - view._min) / view._range) *
              interpolator.range) + interpolator.lower
     return time, value
Example #5
0
 def _selectionEnd(self, item, target, event):
     seeker = self.app.current.seeker
     self.pointer_ungrab(self.get_root_item(), event.time)
     self._selecting = False
     self._marquee.props.visibility = goocanvas.ITEM_INVISIBLE
     if not self._got_motion_notify:
         self.timeline.setSelectionTo(set(), 0)
         seeker.seek(Zoomable.pixelToNs(event.x))
     else:
         self._got_motion_notify = False
         mode = 0
         if event.get_state() & gtk.gdk.SHIFT_MASK:
             mode = 1
         if event.get_state() & gtk.gdk.CONTROL_MASK:
             mode = 2
         self.timeline.setSelectionTo(self._objectsUnderMarquee(), mode)
     return True
Example #6
0
 def _selectionEnd(self, item, target, event):
     seeker = self.app.current.seeker
     self.pointer_ungrab(self.get_root_item(), event.time)
     self._selecting = False
     self._marquee.props.visibility = goocanvas.ITEM_INVISIBLE
     if not self._got_motion_notify:
         self.timeline.setSelectionTo(set(), 0)
         seeker.seek(Zoomable.pixelToNs(event.x))
     else:
         self._got_motion_notify = False
         mode = 0
         if event.get_state() & gtk.gdk.SHIFT_MASK:
             mode = 1
         if event.get_state() & gtk.gdk.CONTROL_MASK:
             mode = 2
         self.timeline.setSelectionTo(self._objectsUnderMarquee(), mode)
     return True
Example #7
0
 def tdur(self):
     return Zoomable.pixelToNs(self.twidth)
Example #8
0
 def set_pos(self, item, pos):
     self._canvas.app.current.seeker.seek(
         Zoomable.pixelToNs(pos[0]))
Example #9
0
 def tdur(self):
     return Zoomable.pixelToNs(self.twidth)
Example #10
0
 def set_pos(self, item, pos):
     x, y = pos
     x += self._hadj.get_value()
     self._canvas.app.current.seeker.seek(Zoomable.pixelToNs(x))
Example #11
0
 def _segment_for_time(self, time):
     # for audio files, we need to know the duration the segment spans
     return time, Zoomable.pixelToNs(self.twidth)
Example #12
0
 def set_pos(self, item, pos):
     x, y = pos
     x += self._hadj.get_value()
     self._canvas.app.current.seeker.seek(Zoomable.pixelToNs(x))
Example #13
0
 def set_pos(self, item, pos):
     self._canvas.app.current.seeker.seek(Zoomable.pixelToNs(pos[0]))