コード例 #1
0
class _BatchWriter(object):
    #: Truncate overly big items to that many bytes for the error message.
    ERRMSG_DATA_TRUNCATION_LEN = 1024

    def __init__(self,
                 url,
                 start,
                 auth,
                 size,
                 interval,
                 qsize,
                 maxitemsize,
                 content_encoding,
                 uploader,
                 callback=None):
        self.url = url
        self.offset = start
        self._nextid = count(start)
        self.auth = auth
        self.size = size
        self.interval = interval
        self.maxitemsize = maxitemsize
        self.content_encoding = content_encoding
        self.checkpoint = time.time()
        self.itemsq = Queue(size * 2 if qsize is None else qsize)
        self.closed = False
        self.flushme = False
        self.uploader = uploader
        self.callback = callback

    def write(self, item):
        assert not self.closed, 'attempting writes to a closed writer'
        data = jsonencode(item)
        if len(data) > self.maxitemsize:
            truncated_data = data[:self.ERRMSG_DATA_TRUNCATION_LEN] + "..."
            raise ValueTooLarge(
                'Value exceeds max encoded size of {}: {!r}'.format(
                    sizeof_fmt(self.maxitemsize), truncated_data))

        self.itemsq.put(data)
        if self.itemsq.full():
            self.uploader.interrupt()
        return next(self._nextid)

    def flush(self):
        self.flushme = True
        self._waitforq()
        self.flushme = False

    def close(self, block=True):
        self.closed = True
        if block:
            self._waitforq()

    def _waitforq(self):
        self.uploader.interrupt()
        self.itemsq.join()

    def __str__(self):
        return self.url
コード例 #2
0
class _BatchWriter(object):
    #: Truncate overly big items to that many bytes for the error message.
    ERRMSG_DATA_TRUNCATION_LEN = 1024

    def __init__(self, url, start, auth, size, interval, qsize,
                 maxitemsize, content_encoding, uploader, callback=None):
        self.url = url
        self.offset = start
        self._nextid = count(start)
        self.auth = auth
        self.size = size
        self.interval = interval
        self.maxitemsize = maxitemsize
        self.content_encoding = content_encoding
        self.checkpoint = time.time()
        self.itemsq = Queue(size * 2 if qsize is None else qsize)
        self.closed = False
        self.flushme = False
        self.uploader = uploader
        self.callback = callback

    def write(self, item):
        assert not self.closed, 'attempting writes to a closed writer'
        data = jsonencode(item)
        if len(data) > self.maxitemsize:
            truncated_data = data[:self.ERRMSG_DATA_TRUNCATION_LEN] + "..."
            raise ValueTooLarge(
                'Value exceeds max encoded size of {} bytes: {!r}'
                .format(self.maxitemsize, truncated_data))

        self.itemsq.put(data)
        if self.itemsq.full():
            self.uploader.interrupt()
        return next(self._nextid)

    def flush(self):
        self.flushme = True
        self._waitforq()
        self.flushme = False

    def close(self, block=True):
        self.closed = True
        if block:
            self._waitforq()

    def _waitforq(self):
        self.uploader.interrupt()
        self.itemsq.join()

    def __str__(self):
        return self.url
コード例 #3
0
def run_multiple_commands_redirect_stdout(
        multiple_args_dict,
        print_commands=True,
        process_limit=-1,
        polling_freq=0.5,
        **kwargs):
    """
    Run multiple shell commands in parallel, write each of their
    stdout output to files associated with each command.

    Parameters
    ----------
    multiple_args_dict : dict
        A dictionary whose keys are files and values are args list.
        Run each args list as a subprocess and write stdout to the
        corresponding file.

    print_commands : bool
        Print shell commands before running them.

    process_limit : int
        Limit the number of concurrent processes to this number. 0
        if there is no limit, -1 to use max number of processors

    polling_freq : int
        Number of seconds between checking for done processes, if
        we have a process limit
    """
    assert len(multiple_args_dict) > 0
    assert all(len(args) > 0 for args in multiple_args_dict.values())
    assert all(hasattr(f, 'name') for f in multiple_args_dict.keys())
    if process_limit < 0:
        logger.debug("Using %d processes" % cpu_count())
        process_limit = cpu_count()

    start_time = time.time()
    processes = Queue(maxsize=process_limit)

    def add_to_queue(process):
        process.start()
        if print_commands:
            handler = logging.FileHandler(process.redirect_stdout_file.name)
            handler.setLevel(logging.DEBUG)
            logger.addHandler(handler)
            logger.debug(" ".join(process.args))
            logger.removeHandler(handler)
        processes.put(process)

    for f, args in multiple_args_dict.items():
        p = AsyncProcess(
                args,
                redirect_stdout_file=f,
                **kwargs)
        if not processes.full():
            add_to_queue(p)
        else:
            while processes.full():
                # Are there any done processes?
                to_remove = []
                for possibly_done in processes.queue:
                    if possibly_done.poll() is not None:
                        possibly_done.wait()
                        to_remove.append(possibly_done)
                # Remove them from the queue and stop checking
                if to_remove:
                    for process_to_remove in to_remove:
                        processes.queue.remove(process_to_remove)
                    break
                # Check again in a second if there weren't
                time.sleep(polling_freq)
            add_to_queue(p)

    # Wait for all the rest of the processes
    while not processes.empty():
        processes.get().wait()

    elapsed_time = time.time() - start_time
    logger.info(
        "Ran %d commands in %0.4f seconds",
        len(multiple_args_dict),
        elapsed_time)
コード例 #4
0
ファイル: process_helpers.py プロジェクト: caperna/mhctools
def run_multiple_commands_redirect_stdout(
        multiple_args_dict,
        print_commands=True,
        process_limit=0,
        polling_freq=1,
        **kwargs):
    """
    Run multiple shell commands in parallel, write each of their
    stdout output to files associated with each command.

    Parameters
    ----------
    multiple_args_dict : dict
        A dictionary whose keys are files and values are args list.
        Run each args list as a subprocess and write stdout to the
        corresponding file.

    print_commands : bool
        Print shell commands before running them.

    process_limit : int
        Limit the number of concurrent processes to this number. 0
        if there is no limit

    polling_freq : int
        Number of seconds between checking for done processes, if
        we have a process limit
    """
    assert len(multiple_args_dict) > 0
    assert all(len(args) > 0 for args in multiple_args_dict.values())
    assert all(hasattr(f, 'name') for f in multiple_args_dict.keys())
    start_time = time.time()
    processes = Queue(maxsize=process_limit)

    def add_to_queue(process):
        if print_commands:
            print(" ".join(process.args), ">",
                  process.redirect_stdout_file.name)
        processes.put(process)

    for f, args in multiple_args_dict.items():
        p = AsyncProcess(
            args,
            redirect_stdout_file=f,
            **kwargs)
        if not processes.full():
            add_to_queue(p)
        else:
            while processes.full():
                # Are there any done processes?
                to_remove = []
                for possibly_done in processes.queue:
                    if possibly_done.poll() is not None:
                        possibly_done.wait()
                        to_remove.append(possibly_done)
                # Remove them from the queue and stop checking
                if to_remove:
                    for process_to_remove in to_remove:
                        processes.queue.remove(process_to_remove)
                    break
                # Check again in a second if there weren't
                time.sleep(polling_freq)
            add_to_queue(p)

    # Wait for all the rest of the processes
    while not processes.empty():
        processes.get().wait()

    elapsed_time = time.time() - start_time
    logging.info(
        "Ran %d commands in %0.4f seconds",
        len(multiple_args_dict),
        elapsed_time)
コード例 #5
0
class FifoReadout(object):

    def __init__(self, chip, readout_interval, moving_average_time_period):
        self.logger = logging.getLogger(self.__class__.__name__)
        self.logger.setLevel(loglevel)

        self.chip = chip
        self.callback = None
        self.errback = None
        self.readout_thread = None
        self.worker_thread = None
        self.watchdog_thread = None
        self.fill_buffer = False
        self.readout_interval = readout_interval
        self._moving_average_time_period = moving_average_time_period
        self._data_deque = deque()
        self._data_buffer = deque()
        self._words_per_read = deque(maxlen=int(self._moving_average_time_period / self.readout_interval))
        self._result = Queue(maxsize=1)
        self._calculate = Event()
        self.stop_readout = Event()
        self.force_stop = Event()
        self.timestamp = None
        self.update_timestamp()
        self._is_running = False
        self.reset_rx()
        self.reset_sram_fifo()
        self._record_count = 0

    @property
    def is_running(self):
        return self._is_running

    @property
    def is_alive(self):
        if self.worker_thread:
            return self.worker_thread.is_alive()
        else:
            False

    @property
    def data(self):
        if self.fill_buffer:
            return self._data_buffer
        else:
            self.logger.warning('Data requested but software data buffer not active')

    def data_words_per_second(self):
        if self._result.full():
            self._result.get()
        self._calculate.set()
        try:
            result = self._result.get(timeout=2 * self.readout_interval)
        except Empty:
            self._calculate.clear()
            return None
        return result / float(self._moving_average_time_period)

    def start(self, callback=None, errback=None, reset_rx=False, reset_sram_fifo=False, reset_errors=True, clear_buffer=False, fill_buffer=False, no_data_timeout=None):
        if self._is_running:
            raise RuntimeError('Readout already running: use stop() before start()')

        self._is_running = True
        self.logger.debug('Starting FIFO readout...')
        self.callback = callback
        self.errback = errback
        self.fill_buffer = fill_buffer
        # self._record_count = 0
        if reset_rx:
            self.reset_rx()
        if reset_sram_fifo:
            self.reset_sram_fifo()
        if reset_errors:
            self.rx_error_reset()
        else:
            fifo_size = self.chip['FIFO']['FIFO_SIZE']
            if fifo_size != 0:
                self.logger.warning('FIFO not empty when starting FIFO readout: size = %i', fifo_size)
        self._words_per_read.clear()
        if clear_buffer:
            self._data_deque.clear()
            self._data_buffer.clear()
        self.stop_readout.clear()
        self.force_stop.clear()
        if self.errback:
            self.watchdog_thread = Thread(target=self.watchdog, name='WatchdogThread')
            self.watchdog_thread.daemon = True
            self.watchdog_thread.start()
        if self.callback:
            self.worker_thread = Thread(target=self.worker, name='WorkerThread')
            self.worker_thread.daemon = True
            self.worker_thread.start()
        self.readout_thread = Thread(target=self.readout, name='ReadoutThread', kwargs={'no_data_timeout': no_data_timeout})
        self.readout_thread.daemon = True
        self.readout_thread.start()

    def stop(self, timeout=10.0):
        if not self._is_running:
            raise RuntimeError('Readout not running: use start() before stop()')
        self._is_running = False
        self.stop_readout.set()
        sleep(0.1)
        try:
            self.readout_thread.join(timeout=timeout)
            if self.readout_thread.is_alive():
                if timeout:
                    raise StopTimeout('FIFO stop timeout after %0.1f second(s)' % timeout)
                else:
                    self.logger.warning('FIFO stop timeout')
        except StopTimeout as e:
            self.force_stop.set()
            if self.errback:
                self.errback(sys.exc_info())
            else:
                self.logger.error(e)
        if self.readout_thread.is_alive():
            self.readout_thread.join()
        if self.errback:
            self.watchdog_thread.join()
        if self.callback:
            self.worker_thread.join()
        self.callback = None
        self.errback = None
        self.logger.debug('Stopped FIFO readout')

    def print_readout_status(self):
        sync_status = self.get_rx_sync_status()
        en_status = self.get_rx_en_status()
        discard_count = self.get_rx_fifo_discard_count()
        decode_error_count = self.get_rx_decode_error_count()

        if not any(self.get_rx_sync_status()) or any(discard_count)  or any(decode_error_count) :
            self.logger.warning('RX errors detected')

        self.logger.info('Recived words:               %d', self._record_count)
        self.logger.info('Data queue size:             %d', len(self._data_deque))
        self.logger.info('FIFO size:                   %d', self.chip['FIFO']['FIFO_SIZE'])
        self.logger.info('Channel:                     %s', " | ".join([channel.name.rjust(3) for channel in self.chip.get_modules('tpx3_rx')]))
        self.logger.info('RX sync:                     %s', " | ".join(["YES".rjust(3) if status is True else "NO".rjust(3) for status in sync_status]))
        self.logger.info('RX enable:                   %s', " | ".join(["YES".rjust(3) if status is True else "NO".rjust(3) for status in en_status]))
        self.logger.info('RX FIFO discard counter:     %s', " | ".join([repr(count).rjust(3) for count in discard_count]))
        self.logger.info('RX decode errors:            %s', " | ".join([repr(count).rjust(3) for count in decode_error_count]))

    def readout(self, no_data_timeout=None):
        '''
            Readout thread continuously reading FIFO. Uses read_data() and appends data to self._data_deque (collection.deque).
        '''
        self.logger.debug('Starting %s', self.readout_thread.name)
        curr_time = self.get_float_time()
        time_wait = 0.0
        while not self.force_stop.wait(time_wait if time_wait >= 0.0 else 0.0):
            try:
                time_read = time()
                if no_data_timeout and curr_time + no_data_timeout < self.get_float_time():
                    raise NoDataTimeout('Received no data for %0.1f second(s)' % no_data_timeout)

                # TODO: maybe not best solution?
                dlist = []
                while time() - time_read < self.readout_interval:
                    dlist.append(self.read_data())
                data = np.hstack(dlist)

                self._record_count += len(data)
            except Exception:
                no_data_timeout = None  # raise exception only once
                if self.errback:
                    self.errback(sys.exc_info())
                else:
                    raise
                if self.stop_readout.is_set():
                    break
            else:
                n_words = data.shape[0]
                last_time, curr_time = self.update_timestamp()
                discard_error = int(np.sum(self.get_rx_fifo_discard_count(), dtype=np.uint32))
                decode_error = int(np.sum(self.get_rx_decode_error_count(), dtype=np.uint32))
                if self.callback:
                    self._data_deque.append((data, last_time, curr_time, discard_error, decode_error))
                if self.fill_buffer:
                    self._data_buffer.append((data, last_time, curr_time, discard_error, decode_error))
                self._words_per_read.append(n_words)
                # FIXME: busy FE prevents scan termination? To be checked
                if n_words == 0 and self.stop_readout.is_set():
                    break
                if discard_error > 0 or decode_error > 0:
                    self.logger.warning('There were {} discard errors and {} decode errors - Resetting error counters'.format(discard_error, decode_error))
                    self.rx_error_reset()
            finally:
                time_wait = self.readout_interval - (time() - time_read)
            if self._calculate.is_set():
                self._calculate.clear()
                self._result.put(sum(self._words_per_read))
        if self.callback:
            self._data_deque.append(None)  # last item, will stop worker
        self.logger.debug('Stopped %s', self.readout_thread.name)

    def worker(self):
        '''
            Worker thread continuously calling callback function when data is available.
        '''
        self.logger.debug('Starting %s', self.worker_thread.name)
        while True:
            try:
                data = self._data_deque.popleft()
            except IndexError:
                self.stop_readout.wait(self.readout_interval)  # sleep a little bit, reducing CPU usage
            else:
                if data is None:  # if None then exit
                    break
                else:
                    try:
                        self.callback(data)
                    except Exception:
                        self.errback(sys.exc_info())

        self.logger.debug('Stopped %s', self.worker_thread.name)

    def watchdog(self):
        self.logger.debug('Starting %s', self.watchdog_thread.name)
        while True:
            try:
                if not any(self.get_rx_sync_status()):
                    raise RxSyncError('No RX sync')
                cnt = self.get_rx_fifo_discard_count()
                if any(cnt):
                    raise FifoError('RX FIFO discard error(s) detected ', cnt)
            except Exception:
                self.errback(sys.exc_info())
            if self.stop_readout.wait(self.readout_interval * 10):
                break
        self.logger.debug('Stopped %s', self.watchdog_thread.name)

    def read_data(self):
        '''
            Read FIFO and return data array
            Can be used without threading.

            Returns
            ----------
            data : list
                    A list of FIFO data words.
        '''
        return self.chip['FIFO'].get_data()

    def update_timestamp(self):
        curr_time = self.get_float_time()
        last_time = self.timestamp
        self.timestamp = curr_time
        return last_time, curr_time

    def read_status(self):
        raise NotImplementedError()

    def reset_sram_fifo(self):
        fifo_size = self.chip['FIFO']['FIFO_SIZE']
        self.logger.debug('Resetting FIFO: size = %i', fifo_size)
        self.update_timestamp()
        self.chip['FIFO']['RESET']
        sleep(0.01)  # sleep here for a while
        fifo_size = self.chip['FIFO']['FIFO_SIZE']
        if fifo_size != 0:
            self.logger.warning('FIFO not empty after reset: size = %i', fifo_size)

    def enable_rx(self, enable=True, channels=None):
        self.logger.debug('Enable RX')
        if channels is None:
            for ch in self.chip.get_modules('tpx3_rx'):
                ch.ENABLE = enable
        else:
            for ch in channels:
                self.chip[ch].ENABLE = enable

    def reset_rx(self, channels=None):
        self.logger.debug('Resetting RX')
        if channels:
            [channel for channel in channels if self.chip[channel].RESET]
        else:
            [channel for channel in self.chip.get_modules('tpx3_rx') if channel.RESET]
        sleep(0.1)  # sleep here for a while

    def rx_error_reset(self, channels=None):
        self.logger.debug('Resetting RX errors')
        if channels:
            [channel for channel in channels if self.chip[channel].rx_error_reset()]
        else:
            [channel for channel in self.chip.get_modules('tpx3_rx') if channel.rx_error_reset()]

    def get_rx_sync_status(self, channels=None):
        if channels:
            return [True if self.chip[channel].READY else False for channel in channels]
        else:
            return [True if channel.READY else False for channel in self.chip.get_modules('tpx3_rx')]

    def get_rx_en_status(self, channels=None):
        if channels:
            return [True if self.chip[channel].ENABLE else False for channel in channels]
        else:
            return [True if channel.ENABLE else False for channel in self.chip.get_modules('tpx3_rx')]

    def get_rx_fifo_discard_count(self, channels=None):
        if channels:
            return [self.chip[channel].LOST_DATA_COUNTER for channel in channels]
        else:
            return [channel.LOST_DATA_COUNTER for channel in self.chip.get_modules('tpx3_rx')]

    def get_rx_decode_error_count(self, channels=None):
        if channels:
            return [self.chip[channel].DECODER_ERROR_COUNTER for channel in channels]
        else:
            return [channel.DECODER_ERROR_COUNTER for channel in self.chip.get_modules('tpx3_rx')]

    def get_float_time(self):
        '''
            Returns time as double precision floats - Time64 in pytables - mapping to and from python datetime's
        '''
        t1 = time()
        t2 = datetime.datetime.fromtimestamp(t1)
        return mktime(t2.timetuple()) + 1e-6 * t2.microsecond