Exemple #1
0
    def _stopRecordingFull(self, handle, location, lastTstamp, delayedStop):
        sink = self.get_element('fdsink')
        if sink.get_state() == gst.STATE_NULL:
            sink.set_state(gst.STATE_READY)

        if handle:
            handle.flush()
            sink.emit('remove', handle.fileno())
            self._recordingStopped(handle, location)
            handle = None
            if not delayedStop:
                self.uiState.set('filename', None)
                self.uiState.set('recording', False)
            try:
                size = formatting.formatStorage(os.stat(location).st_size)
            except EnvironmentError, e:
                self.debug("Failed to stat %s: %s", location,
                           log.getExceptionMessage(e))
                # catch File not found, permission denied, disk problems
                size = "unknown"

            # Limit number of entries on filelist, remove the oldest entry
            fl = self.uiState.get('filelist', otherwise=[])
            if FILELIST_SIZE == len(fl):
                self.uiState.remove('filelist', fl[0])

            self.uiState.append('filelist', (lastTstamp, location, size))

            if not delayedStop and self._symlinkToLastRecording:
                self._updateSymlink(location, self._symlinkToLastRecording)
Exemple #2
0
    def _stopRecordingFull(self, handle, location, lastTstamp, delayedStop):
        sink = self.get_element('fdsink')
        if sink.get_state() == gst.STATE_NULL:
            sink.set_state(gst.STATE_READY)

        if handle:
            handle.flush()
            sink.emit('remove', handle.fileno())
            self._recordingStopped(handle, location)
            handle = None
            if not delayedStop:
                self.uiState.set('filename', None)
                self.uiState.set('recording', False)
            try:
                size = formatting.formatStorage(os.stat(location).st_size)
            except EnvironmentError, e:
                self.debug("Failed to stat %s: %s", location,
                      log.getExceptionMessage(e))
                # catch File not found, permission denied, disk problems
                size = "unknown"

            # Limit number of entries on filelist, remove the oldest entry
            fl = self.uiState.get('filelist', otherwise=[])
            if FILELIST_SIZE == len(fl):
                self.uiState.remove('filelist', fl[0])

            self.uiState.append('filelist', (lastTstamp,
                                             location,
                                             size))

            if not delayedStop and self._symlinkToLastRecording:
                self._updateSymlink(location,
                                    self._symlinkToLastRecording)
Exemple #3
0
    def get_pipeline_string(self, properties):
        directory = properties['directory']

        self.directory = directory

        self.fixRenamedProperties(properties, [('rotateType', 'rotate-type')])

        rotateType = properties.get('rotate-type', 'none')

        # now act on the properties
        if rotateType == 'size':
            self.setSizeRotate(properties['size'])
            self.uiState.set('rotate-type',
                             'every %sB' % \
                             formatting.formatStorage(properties['size']))
        elif rotateType == 'time':
            self.setTimeRotate(properties['time'])
            self.uiState.set('rotate-type',
                             'every %s' % \
                             formatting.formatTime(properties['time']))
        else:
            self.uiState.set('rotate-type', 'disabled')
        # FIXME: should add a way of saying "do first cycle at this time"

        return self.pipe_template
Exemple #4
0
    def stopRecording(self):
        sink = self.get_element('fdsink')
        if sink.get_state() == gst.STATE_NULL:
            sink.set_state(gst.STATE_READY)

        if self.file:
            self.file.flush()
            sink.emit('remove', self.file.fileno())
            self._recordingStopped(self.file, self.location)
            self.file = None
            self.uiState.set('filename', None)
            self.uiState.set('recording', False)
            try:
                size = format.formatStorage(os.stat(self.location).st_size)
            except EnvironmentError, e:
                # catch File not found, permission denied, disk problems
                size = "unknown"

            # Limit number of entries on filelist, remove the oldest entry
            fl = self.uiState.get('filelist', otherwise=[])
            if FILELIST_SIZE == len(fl):
                self.uiState.remove('filelist', fl[0])

            self.uiState.append('filelist', (self.last_tstamp,
                                             self.location,
                                             size))

            if self._symlinkToLastRecording:
                self._updateSymlink(self.location,
                                    self._symlinkToLastRecording)
Exemple #5
0
    def get_pipeline_string(self, properties):
        directory = properties['directory']

        self.directory = directory

        self.fixRenamedProperties(properties, [('rotateType', 'rotate-type')])

        rotateType = properties.get('rotate-type', 'none')

        # now act on the properties
        if rotateType == 'size':
            self.setSizeRotate(properties['size'])
            self.uiState.set('rotate-type',
                             'every %sB' % \
                             formatting.formatStorage(properties['size']))
        elif rotateType == 'time':
            self.setTimeRotate(properties['time'])
            self.uiState.set('rotate-type',
                             'every %s' % \
                             formatting.formatTime(properties['time']))
        else:
            self.uiState.set('rotate-type', 'disabled')
        # FIXME: should add a way of saying "do first cycle at this time"

        return self.pipe_template
Exemple #6
0
    def update_mem(self, size):
        if not size:
            size = _('Unknown')
            fraction = 0
        else:
            fraction = size / self.total_mem
            size = '%sB' % formatStorage(size)

        self.mem.set_text(size)
        self.mem.set_fraction(fraction)
Exemple #7
0
    def update_mem(self, size):
        if not size:
            size = _('Unknown')
            fraction = 0
        else:
            fraction = size / self.total_mem
            size = '%sB' % formatting.formatStorage(size)

        self.mem.set_text(size)
        self.mem.set_fraction(fraction)
Exemple #8
0
    def updateState(self, set):
        c = self

        bytes_sent = c.getBytesSent()
        bytes_received = c.getBytesReceived()
        uptime = c.getUptime()

        set("stream-mime", c.get_mime())
        set("stream-url", c.getUrl())
        set("stream-uptime", formatting.formatTime(uptime))
        bitspeed = bytes_received * 8 / uptime
        currentbitrate = self.getCurrentBitrate()
        set("stream-bitrate", formatting.formatStorage(bitspeed) + "bit/s")
        set("stream-current-bitrate", formatting.formatStorage(currentbitrate) + "bit/s")
        set("stream-totalbytes", formatting.formatStorage(bytes_received) + "Byte")
        set("stream-bitrate-raw", bitspeed)
        set("stream-totalbytes-raw", bytes_received)

        set("clients-current", str(c.getClients()))
        set("clients-max", str(c.getMaxClients()))
        set("clients-peak", str(c.getPeakClients()))
        set("clients-peak-time", c.getPeakEpoch())
        set("clients-average", str(int(c.getAverageClients())))

        bitspeed = bytes_sent * 8 / uptime
        set("consumption-bitrate", formatting.formatStorage(bitspeed) + "bit/s")
        set("consumption-bitrate-current", formatting.formatStorage(currentbitrate * c.getClients()) + "bit/s")
        set("consumption-totalbytes", formatting.formatStorage(bytes_sent) + "Byte")
        set("consumption-bitrate-raw", bitspeed)
        set("consumption-totalbytes-raw", bytes_sent)
Exemple #9
0
    def updateState(self, set):
        c = self

        bytes_sent = c.getBytesSent()
        bytes_received = c.getBytesReceived()
        uptime = c.getUptime()

        set('stream-mime', c.get_mime())
        set('stream-url', c.getUrl())
        set('stream-uptime', formatTime(uptime))
        bitspeed = bytes_received * 8 / uptime
        currentbitrate = self.getCurrentBitrate()
        set('stream-bitrate', formatStorage(bitspeed) + 'bit/s')
        set('stream-current-bitrate',
            formatStorage(currentbitrate) + 'bit/s')
        set('stream-totalbytes', formatStorage(bytes_received) + 'Byte')
        set('stream-bitrate-raw', bitspeed)
        set('stream-totalbytes-raw', bytes_received)

        set('clients-current', str(c.getClients()))
        set('clients-max', str(c.getMaxClients()))
        set('clients-peak', str(c.getPeakClients()))
        set('clients-peak-time', c.getPeakEpoch())
        set('clients-average', str(int(c.getAverageClients())))

        bitspeed = bytes_sent * 8 / uptime
        set('consumption-bitrate', formatStorage(bitspeed) + 'bit/s')
        set('consumption-bitrate-current',
            formatStorage(currentbitrate * c.getClients()) + 'bit/s')
        set('consumption-totalbytes', formatStorage(bytes_sent) + 'Byte')
        set('consumption-bitrate-raw', bitspeed)
        set('consumption-totalbytes-raw', bytes_sent)
Exemple #10
0
    def allocateCacheSpace(self, size):
        """
        Try to reserve cache space.

        If there is not enough space and the cache cleanup is enabled,
        it will delete files from the cache starting with the ones
        with oldest access time until the cache usage drops below
        the fraction specified by the property cleanup-low-threshold.

        Returns a 'tag' that should be used to 'free' the cache space
        using releaseCacheSpace.
        This tag is needed to better estimate the cache usage,
        if the cache usage has been updated since cache space
        has been allocated, freeing up the space should not change
        the cache usage estimation.

        @param size: size to reserve, in bytes
        @type  size: int

        @returns: an allocation tag or None if the allocation failed.
        @rtype:   tuple
        """
        usage = self.updateCacheUsage()
        if (usage + size) < self._cacheMaxUsage:
            self._cacheUsage += size
            self.updateCacheUsageStatistics()
            return (self._cacheUsageLastUpdate, size)

        self.debug('cache usage will be %sbytes, need more cache',
            format.formatStorage(usage + size))

        if not self._cleanupEnabled:
            self.debug('not allowed to clean up cache, so cannot cache')
            # No space available and cleanup disabled: allocation failed.
            return None

        # Update cleanup statistics
        self.stats.onCleanup()
        # List the cached files with file state
        os.chdir(self._cacheDir)

        files = []
        for f in os.listdir('.'):
            # There's a possibility of getting an error on os.stat here. See
            # similar comment in updateCacheUsage()
            try:
                files.append((f, os.stat(f)))
            except OSError, e:
                if e.errno == errno.ENOENT:
                    pass
                else:
                    raise
Exemple #11
0
    def do(self, args):
        # get a list of pid, vsize and sort on vsize in reverse order
        l = []
        processes = getProcesses(prefix=self.prefix)
        if not processes:
            return util.ok('No job processes running.')

        for process in processes.values():
            l.append((process.pid, process.vsize))

        l.sort(key=lambda t: t[1])
        l.reverse()

        # check the one with the mostest
        pid, vsize = l[0]

        warning = parseSize(self.options.warning)
        critical = parseSize(self.options.critical)

        if vsize >= critical:
            # count number of critical jobs
            which = [t for t in l if t[1] >= critical]
            return util.critical(
                '%d %s(s) above critical level - highest is %d at %s' %
                (len(which), self.process_type, pid,
                 formatting.formatStorage(vsize)))

        if vsize >= warning:
            # count number of warning jobs
            which = [t for t in l if t[1] >= warning]
            return util.warning(
                '%d %s(s) above warning level - highest is %d at %s' %
                (len(which), self.process_type, pid,
                 formatting.formatStorage(vsize)))

        return util.ok(
            'No %s processes above warning level '
            '(highest is %d at %s)' %
            (self.process_type, pid, formatting.formatStorage(vsize)))
Exemple #12
0
    def do(self, args):
        # get a list of pid, vsize and sort on vsize in reverse order
        l = []
        processes = getProcesses(prefix=self.prefix)
        if not processes:
            return util.ok('No job processes running.')

        for process in processes.values():
            l.append((process.pid, process.vsize))

        l.sort(key=lambda t: t[1])
        l.reverse()

        # check the one with the mostest
        pid, vsize = l[0]

        warning = parseSize(self.options.warning)
        critical = parseSize(self.options.critical)

        if vsize >= critical:
            # count number of critical jobs
            which = [t for t in l if t[1] >= critical]
            return util.critical(
                '%d %s(s) above critical level - highest is %d at %s' % (
                    len(which), self.process_type, pid,
                    formatting.formatStorage(vsize)))

        if vsize >= warning:
            # count number of warning jobs
            which = [t for t in l if t[1] >= warning]
            return util.warning(
                '%d %s(s) above warning level - highest is %d at %s' % (
                    len(which), self.process_type, pid,
                    formatting.formatStorage(vsize)))

        return util.ok('No %s processes above warning level '
            '(highest is %d at %s)' % (
                self.process_type, pid, formatting.formatStorage(vsize)))
    def configure_pipeline(self, pipeline, properties):
        element = pipeline.get_by_name('encoder')
        if properties.has_key('bitrate'):
            element.set_property('bitrate', properties['bitrate'])
        if properties.has_key('complexity'):
            element.set_property('complexity', properties['complexity'])
        if properties.has_key('sharpness'):
            element.set_property('sharpness', properties['sharpness'])
        if properties.has_key('key-frame-distance'):
            element.set_property('key-frame-distance', properties['key-frame-distance'])
        if properties.has_key('buffer-delay'):
            element.set_property('buffer-delay', properties['buffer-delay'])

        self.uiState.set('complexity', element.get_property('complexity'))
        self.uiState.set('bitrate', formatStorage(element.get_property('bitrate')) + 'bit/s')
Exemple #14
0
    def get_pipeline_string(self, properties):
        directory = properties['directory']

        self.directory = directory

        self.fixRenamedProperties(properties, [('rotateType', 'rotate-type')])

        rotateType = properties.get('rotate-type', 'none')

        # validate rotate-type and size/time properties first
        if not rotateType in ['none', 'size', 'time']:
            m = messages.Error(T_(N_(
                "The configuration property 'rotate-type' should be set to "
                "'size', time', or 'none', not '%s'. "
                "Please fix the configuration."),
                    rotateType), mid='rotate-type')
            self.addMessage(m)
            raise errors.ComponentSetupHandledError()

        # size and time types need the property specified
        if rotateType in ['size', 'time']:
            if rotateType not in properties.keys():
                m = messages.Error(T_(N_(
                    "The configuration property '%s' should be set. "
                    "Please fix the configuration."),
                        rotateType), mid='rotate-type')
                self.addMessage(m)
                raise errors.ComponentSetupHandledError()

        # now act on the properties
        if rotateType == 'size':
            self.setSizeRotate(properties['size'])
            self.uiState.set('rotate-type',
                             'every %sB' % \
                             format.formatStorage(properties['size']))
        elif rotateType == 'time':
            self.setTimeRotate(properties['time'])
            self.uiState.set('rotate-type',
                             'every %s' % \
                             format.formatTime(properties['time']))
        else:
            self.uiState.set('rotate-type', 'disabled')
        # FIXME: should add a way of saying "do first cycle at this time"

        return self.pipe_template
Exemple #15
0
    def _allocateCacheSpace(self, usage, size):
        if usage + size < self._cacheMaxUsage:
            self._cacheUsage += size
            self.updateCacheUsageStatistics()
            return defer.succeed((self._cacheUsageLastUpdate, size))

        self.debug('cache usage will be %sbytes, need more cache',
            formatting.formatStorage(usage + size))

        if not self._cleanupEnabled:
            # No space available and cleanup disabled: allocation failed.
            self.debug('not allowed to clean up cache, '
                       'so cannot cache %d' % size)
            return defer.succeed(None)

        d = self._cleanUp()
        d.addCallback(self._allocateCacheSpaceAfterCleanUp, size)
        return d
Exemple #16
0
    def updateState(self, set):
        c = self

        bytes_sent = c.getBytesSent()
        bytes_received = c.getBytesReceived()
        uptime = c.getUptime()

        set('stream-mime', c.get_mime())
        set('stream-url', c.getUrl())
        set('stream-uptime', formatting.formatTime(uptime))
        bitspeed = bytes_received * 8 / uptime
        currentbitrate = self.getCurrentBitrate()
        set('stream-bitrate', formatting.formatStorage(bitspeed) + 'bit/s')
        set('stream-current-bitrate',
            formatting.formatStorage(currentbitrate) + 'bit/s')
        set('stream-totalbytes',
            formatting.formatStorage(bytes_received) + 'Byte')
        set('stream-bitrate-raw', bitspeed)
        set('stream-totalbytes-raw', bytes_received)

        set('clients-current', str(c.getClients()))
        set('clients-max', str(c.getMaxClients()))
        set('clients-peak', str(c.getPeakClients()))
        set('clients-peak-time', c.getPeakEpoch())
        set('clients-average', str(int(c.getAverageClients())))

        bitspeed = bytes_sent * 8 / uptime
        set('consumption-bitrate',
            formatting.formatStorage(bitspeed) + 'bit/s')
        set(
            'consumption-bitrate-current',
            formatting.formatStorage(currentbitrate * c.getClients()) +
            'bit/s')
        set('consumption-totalbytes',
            formatting.formatStorage(bytes_sent) + 'Byte')
        set('consumption-bitrate-raw', bitspeed)
        set('consumption-totalbytes-raw', bytes_sent)
Exemple #17
0
 def testGibibyte(self):
     value = 1024 * 1024 * 1024
     self.assertEquals(formatting.formatStorage(value, 4), "1.0737 G")
Exemple #18
0
 def _updateVSize(self, vsize):
     # given int for vsize in bytes, update the label
     if not vsize:
         self._label_vsize.set_text(_('Unknown'))
     else:
         self._label_vsize.set_text('%sB' % formatting.formatStorage(vsize))
Exemple #19
0
 def _setEaterBytesReadCurrent(self, state, value):
     txt = value and (formatStorage(value) + _('Byte')) or ''
     self.labels['bytes-read-current'].set_text(txt)
     self._updateConnectionTime()
     self._updateDisconnectionTime()
Exemple #20
0
 def setFeederClientBytesReadTotal(self, state, value):
     txt = value and (formatting.formatStorage(value) + _('Byte')) or ''
     self.labels['bytes-read-total'].set_text(txt)
Exemple #21
0
 def testTebibyte(self):
     value = 1024 * 1024 * 1024 * 1024
     assert formatStorage(value, 4) == "1.0995 T"
 def testPebibyte(self):
     value = 1024 * 1024 * 1024 * 1024 * 1024
     self.assertEquals(formatting.formatStorage(value, 4), "1.1259 P")
Exemple #23
0
 def testExbibyte(self):
     value = 1024 * 1024 * 1024 * 1024 * 1024 * 1024
     assert formatStorage(value, 4) == "1.1529 E"
Exemple #24
0
 def testPebibyte(self):
     value = 1024 * 1024 * 1024 * 1024 * 1024
     self.assertEquals(formatting.formatStorage(value, 4), "1.1259 P")
Exemple #25
0
                    pass
                else:
                    return defer.fail(e)

        # Calculate the cached file total size
        usage = sum([d[1].st_size for d in files])
        # Delete the cached file starting by the oldest accessed ones
        files.sort(key=lambda d: d[1].st_atime)
        rmlist = []
        for path, info in files:
            usage -= info.st_size
            rmlist.append(path)
            if usage <= self._cacheMinUsage:
                # We reach the cleanup limit
                self.debug('cleaned up, cache use is now %sbytes',
                    format.formatStorage(usage))
                break
        d = threads.deferToThread(self._rmfiles, rmlist)
        d.addBoth(self._setCacheUsage, usage)
        return d

    def _allocateCacheSpaceAfterCleanUp(self, usage, size):
        if (self._cacheUsage + size) >= self._cacheSize:
            # There is not enough space, allocation failed
            self.updateCacheUsageStatistics()
            self.debug('not enough space in cache, '
                       'cannot cache %d > %d' %
                       (self._cacheUsage + size, self._cacheSize))
            return None

        # There is enough space to allocate, allocation succeed
Exemple #26
0
 def testMegabyte(self):
     value = 1000 * 1000
     self.assertEquals(formatting.formatStorage(value), "1.00 M")
Exemple #27
0
 def testBytes(self):
     value = 4
     self.assertEquals(formatting.formatStorage(value), "4.00 ")
Exemple #28
0
class Disker(feedcomponent.ParseLaunchComponent, log.Loggable):
    logCategory = "disker"

    componentMediumClass = DiskerMedium
    checkOffset = True
    pipe_template = 'multifdsink name=fdsink sync-method=2 mode=1 sync=false'
    file = None
    directory = None
    location = None
    caps = None
    last_tstamp = None
    indexLocation = None
    writeIndex = False
    syncOnTdt = False
    timeOverlap = 0
    reactToMarks = False

    _offset = 0L
    _headers_size = 0
    _index = None
    _nextIsKF = False
    _lastTdt = None
    _lastEntry = None  # last KF/TDT
    _clients = {}  # dict of clients {fd: (index, synced)}
    _startFilenameTemplate = None  # template to use when starting off recording
    _startTime = None  # time of event when starting
    _rotateTimeDelayedCall = None
    _pollDiskDC = None  # _pollDisk delayed calls
    _symlinkToLastRecording = None
    _symlinkToCurrentRecording = None

    #   see the commented out import statement for IStateCacheableListener at
    #   the beginning of this file
    # implements(IStateCacheableListener)

    ### BaseComponent methods

    def init(self):
        self._can_schedule = (eventcalendar.HAS_ICALENDAR
                              and eventcalendar.HAS_DATEUTIL)
        self.uiState.addKey('filename', None)
        self.uiState.addKey('recording', False)
        self.uiState.addKey('can-schedule', self._can_schedule)
        self.uiState.addKey('has-schedule', False)
        self.uiState.addKey('rotate-type', None)
        self.uiState.addKey('disk-free', None)
        # list of (dt (in UTC, without tzinfo), which, content)
        self.uiState.addListKey('next-points')
        self.uiState.addListKey('filelist')

        self._diskPoller = poller.Poller(self._pollDisk,
                                         DISKPOLL_FREQ,
                                         start=False)

    ### uiState observer triggers

    def observerAppend(self, observer, num):
        # PB may not have finished setting up its state and doing a
        # remoteCall immediately here may cause some problems to the other
        # side. For us to send the initial disk usage value with no
        # noticeable delay, we will do it in a callLater with a timeout
        # value of 0
        self.debug("observer has started watching us, starting disk polling")
        if not self._diskPoller.running and not self._pollDiskDC:
            self._pollDiskDC = reactor.callLater(0,
                                                 self._diskPoller.start,
                                                 immediately=True)
        # Start the BaseComponent pollers
        feedcomponent.ParseLaunchComponent.observerAppend(self, observer, num)

    def observerRemove(self, observer, num):
        if num == 0:
            # cancel delayed _pollDisk calls if there's any
            if self._pollDiskDC:
                self._pollDiskDC.cancel()
                self._pollDiskDC = None

            self.debug("no more observers left, shutting down disk polling")
            self._diskPoller.stop()
        # Stop the BaseComponent pollers
        feedcomponent.ParseLaunchComponent.observerRemove(self, observer, num)

    def check_properties(self, props, addMessage):
        props = self.config['properties']
        rotateType = props.get('rotate-type', 'none')

        if not rotateType in ['none', 'size', 'time']:
            msg = messages.Error(T_(
                N_("The configuration property 'rotate-type' should be set to "
                   "'size', time', or 'none', not '%s'. "
                   "Please fix the configuration."), rotateType),
                                 mid='rotate-type')
            addMessage(msg)
            raise errors.ConfigError(msg)

        if rotateType in ['size', 'time']:
            if rotateType not in props.keys():
                msg = messages.Error(T_(
                    N_("The configuration property '%s' should be set. "
                       "Please fix the configuration."), rotateType),
                                     mid='rotate-type')
                addMessage(msg)
                raise errors.ConfigError(msg)

            if props[rotateType] == 0:
                msg = messages.Error(T_(N_("Configuration error: " \
                    "'rotate-type' %s value cannot be set to 0."),
                    rotateType), mid='rotate-type')
                addMessage(msg)
                raise errors.ConfigError(msg)

    ### ParseLaunchComponent methods

    def get_pipeline_string(self, properties):
        directory = properties['directory']

        self.directory = directory

        self.fixRenamedProperties(properties, [('rotateType', 'rotate-type')])

        rotateType = properties.get('rotate-type', 'none')

        # now act on the properties
        if rotateType == 'size':
            self.setSizeRotate(properties['size'])
            self.uiState.set('rotate-type',
                             'every %sB' % \
                             formatting.formatStorage(properties['size']))
        elif rotateType == 'time':
            self.setTimeRotate(properties['time'])
            self.uiState.set('rotate-type',
                             'every %s' % \
                             formatting.formatTime(properties['time']))
        else:
            self.uiState.set('rotate-type', 'disabled')
        # FIXME: should add a way of saying "do first cycle at this time"

        return self.pipe_template

    def configure_pipeline(self, pipeline, properties):
        self.debug('configure_pipeline for disker')
        self._clock = pipeline.get_clock()
        self._symlinkToLastRecording = \
            properties.get('symlink-to-last-recording', None)
        self._symlinkToCurrentRecording = \
            properties.get('symlink-to-current-recording', None)
        self._recordAtStart = properties.get('start-recording', True)
        self._defaultFilenameTemplate = properties.get(
            'filename', '%s.%%Y%%m%%d-%%H%%M%%S' % self.getName())
        self._startFilenameTemplate = self._defaultFilenameTemplate
        icalfn = properties.get('ical-schedule')
        if self._can_schedule and icalfn:
            self.scheduleRecordings(open(icalfn, 'r'))
        elif icalfn:
            # ical schedule is set, but self._can_schedule is False

            def missingModule(moduleName):
                m = messages.Error(T_(
                    N_("An iCal file has been specified for scheduling, "
                       "but the '%s' module is not installed.\n"), moduleName),
                                   mid='error-python-%s' % moduleName)
                documentation.messageAddPythonInstall(m, moduleName)
                self.debug(m)
                self.addMessage(m)

            if not eventcalendar.HAS_ICALENDAR:
                missingModule('icalendar')
            if not eventcalendar.HAS_DATEUTIL:
                missingModule('dateutil')
            # self._can_schedule is False, so one of the above surely happened
            raise errors.ComponentSetupHandledError()

        self.writeIndex = properties.get('write-index', False)
        self.reactToMarks = properties.get('react-to-stream-markers', False)
        self.syncOnTdt = properties.get('sync-on-tdt', False)
        self.timeOverlap = properties.get('time-overlap', 0)

        sink = self.get_element('fdsink')

        if gstreamer.element_factory_has_property('multifdsink',
                                                  'resend-streamheader'):
            sink.set_property('resend-streamheader', False)
        else:
            self.debug("resend-streamheader property not available, "
                       "resending streamheader when it changes in the caps")
        sink.get_pad('sink').connect('notify::caps', self._notify_caps_cb)
        # connect to client-removed so we can detect errors in file writing
        sink.connect('client-removed', self._client_removed_cb)

        if self.writeIndex:
            sink.connect('client-added', self._client_added_cb)

        if self.reactToMarks:
            pfx = properties.get('stream-marker-filename-prefix', '%03d.')
            self._markerPrefix = pfx

        if self.reactToMarks or self.writeIndex or self.syncOnTdt:
            sink.get_pad("sink").add_data_probe(self._src_pad_probe)

    ### our methods

    def _tdt_to_datetime(self, s):
        '''
        TDT events contains a structure representing the UTC time of the
        stream with the fields:
        'year', 'month', 'day', 'hour', 'minute', 'second'

        Can raise and Exception if the structure doesn't cotains all the
        requiered fields. Protect with try-except.
        '''
        if s.get_name() != 'tdt':
            return None
        t = dt.datetime(s['year'], s['month'], s['day'], s['hour'],
                        s['minute'], s['second'])
        return time.mktime(t.timetuple())

    def _getStats(self, fd):
        sink = self.get_element('fdsink')
        stats = sink.emit('get-stats', fd)
        if len(stats) <= 6:
            self.warning("The current version of multifdsink doesn't "
                         "include the timestamp of the first and last "
                         "buffers sent: the indexing will be disabled.")
            m = messages.Warning(
                T_(
                    N_("Versions up to and including %s of the '%s' "
                       "GStreamer plug-in can't be used to write index "
                       "files.\n"), '0.10.30', 'multifdsink'))
            self.addMessage(m)
            self.writeIndex = False
            return None
        return stats

    def _updateIndex(self, offset, timestamp, isKeyframe, tdt=0):
        for fd, val in self._clients.items():
            index, synced = val
            if not synced:
                stats = self._getStats(fd)
                # Check if multifdsink can be used for indexing
                if not stats:
                    return
                # Very unlikely, but if we are not synced yet,
                # add this entry to the index because it's going
                # to be the sync point, and continue
                if stats[6] == gst.CLOCK_TIME_NONE:
                    index.addEntry(offset, timestamp, isKeyframe, tdt, False)
                    continue
                # if we know when the client was synced, trim the index.
                index.updateStart(stats[6])
                self._clients[fd] = (index, True)
                # At this point we should have only one entry in the index
                # which will be written to file after getting the next sync
                # buffer and we can update its duration and length.

            index.addEntry(offset, timestamp, isKeyframe, tdt, True)
        self._lastEntry = (offset, timestamp, isKeyframe, tdt)

    def _pollDisk(self):
        # Figure out the remaining disk space where the disker is saving
        # files to
        self._pollDiskDC = None
        s = None
        try:
            s = os.statvfs(self.directory)
        except Exception, e:
            self.debug('failed to figure out disk space: %s',
                       log.getExceptionMessage(e))

        if not s:
            free = None
        else:
            free = formatting.formatStorage(s.f_frsize * s.f_bavail)

        if self.uiState.get('disk-free') != free:
            self.debug("disk usage changed, reporting to observers")
            self.uiState.set('disk-free', free)
Exemple #29
0
                    pass
                else:
                    return defer.fail(e)

        # Calculate the cached file total size
        usage = sum([d[1].st_size for d in files])
        # Delete the cached file starting by the oldest accessed ones
        files.sort(key=lambda d: d[1].st_atime)
        rmlist = []
        for path, info in files:
            usage -= info.st_size
            rmlist.append(path)
            if usage <= self._cacheMinUsage:
                # We reach the cleanup limit
                self.debug('cleaned up, cache use is now %sbytes',
                    formatting.formatStorage(usage))
                break
        d = threads.deferToThread(self._rmfiles, rmlist)
        d.addBoth(self._setCacheUsage, usage)
        return d

    def _allocateCacheSpaceAfterCleanUp(self, usage, size):
        if (self._cacheUsage + size) >= self._cacheSize:
            # There is not enough space, allocation failed
            self.updateCacheUsageStatistics()
            self.debug('not enough space in cache, '
                       'cannot cache %d > %d' %
                       (self._cacheUsage + size, self._cacheSize))
            return None

        # There is enough space to allocate, allocation succeed
 def _updateVSize(self, vsize):
     # given int for vsize in bytes, update the label
     if not vsize:
         self._label_vsize.set_text(_('Unknown'))
     else:
         self._label_vsize.set_text('%sB' % formatting.formatStorage(vsize))
Exemple #31
0
def _formatBytes(value):
    return formatting.formatStorage(value) + _('Byte')
Exemple #32
0
 def testKibibyte(self):
     value = 1024
     self.assertEquals(formatting.formatStorage(value), "1.02 k")
     self.assertEquals(formatting.formatStorage(value, 3), "1.024 k")
 def testBytes(self):
     value = 4
     self.assertEquals(formatting.formatStorage(value), "4.00 ")
Exemple #34
0
 def testMebibyte(self):
     value = 1024 * 1024
     self.assertEquals(formatting.formatStorage(value), "1.05 M")
     self.assertEquals(formatting.formatStorage(value, 3), "1.049 M")
     self.assertEquals(formatting.formatStorage(value, 4), "1.0486 M")
 def testMegabyte(self):
     value = 1000 * 1000
     self.assertEquals(formatting.formatStorage(value), "1.00 M")
Exemple #36
0
 def testTebibyte(self):
     value = 1024 * 1024 * 1024 * 1024
     self.assertEquals(formatting.formatStorage(value, 4), "1.0995 T")
 def testGibibyte(self):
     value = 1024 * 1024 * 1024
     self.assertEquals(formatting.formatStorage(value, 4), "1.0737 G")
Exemple #38
0
 def testExbibyte(self):
     value = 1024 * 1024 * 1024 * 1024 * 1024 * 1024
     self.assertEquals(formatting.formatStorage(value, 4), "1.1529 E")
Exemple #39
0
 def testPebibyte(self):
     value = 1024 * 1024 * 1024 * 1024 * 1024
     assert formatStorage(value, 4) == "1.1259 P"
Exemple #40
0
def _formatBitrate(value):
    return formatting.formatStorage(value) + _('bit/s')
Exemple #41
0
def _formatBytes(value):
    return formatting.formatStorage(value) + _('Byte')
 def testKibibyte(self):
     value = 1024
     self.assertEquals(formatting.formatStorage(value), "1.02 k")
     self.assertEquals(formatting.formatStorage(value, 3), "1.024 k")
Exemple #43
0
def _formatBitrate(value):
    return formatting.formatStorage(value) + _('bit/s')
 def testMebibyte(self):
     value = 1024 * 1024
     self.assertEquals(formatting.formatStorage(value), "1.05 M")
     self.assertEquals(formatting.formatStorage(value, 3), "1.049 M")
     self.assertEquals(formatting.formatStorage(value, 4), "1.0486 M")
 def setFeederClientBytesReadCurrent(self, state, value):
     txt = value and (formatting.formatStorage(value) + _("Byte")) or ""
     self.labels["bytes-read-current"].set_text(txt)
     self.updateConnectionTime()
     self.updateDisconnectionTime()
 def testTebibyte(self):
     value = 1024 * 1024 * 1024 * 1024
     self.assertEquals(formatting.formatStorage(value, 4), "1.0995 T")
 def setFeederClientBytesReadTotal(self, state, value):
     txt = value and (formatting.formatStorage(value) + _("Byte")) or ""
     self.labels["bytes-read-total"].set_text(txt)
 def testExbibyte(self):
     value = 1024 * 1024 * 1024 * 1024 * 1024 * 1024
     self.assertEquals(formatting.formatStorage(value, 4), "1.1529 E")
Exemple #49
0
 def setFeederClientBytesReadCurrent(self, state, value):
     txt = value and (formatting.formatStorage(value) + _('Byte')) or ''
     self.labels['bytes-read-current'].set_text(txt)
     self.updateConnectionTime()
     self.updateDisconnectionTime()