コード例 #1
0
    def run(self):
        args = list(islice(self.reqs, self.requests))
        if self.shuffle:
            random.shuffle(args)
        print("Total requests: %d" % len(args))
        print("Concurrency   : %d" % self.concurrency)

        starttime = time.time()
        q, p = Queue(), Queue()
        for _ in six.moves.range(self.concurrency):
            t = Thread(target=worker, args=(self.host, q, p, self.verbose))
            t.daemon = True
            t.start()
        for a in args:
            q.put(a)
        q.join()

        outputs = []
        for _ in six.moves.range(self.requests):
            outputs.append(p.get())

        elapsed = time.time() - starttime
        print()
        print("Total requests: %d" % len(args))
        print("Concurrency   : %d" % self.concurrency)
        print("Elapsed time  : %.3fs" % elapsed)
        print("Avg time p/req: %.3fs" % (elapsed / len(args)))
        print("Received (per status code or error):")
        for c, n in Counter(outputs).items():
            print("  %s: %d" % (c, n))
コード例 #2
0
ファイル: __init__.py プロジェクト: MahatmaCane/iris
    def lines(self, fuseki_process):
        """
        Provides an iterator generating the encoded string representation
        of each member of this metarelate mapping translation.

        Returns:
            An iterator of string.

        """
        msg = '\tGenerating phenomenon translation {!r}.'
        print(msg.format(self.mapping_name))
        lines = ['\n%s = {\n' % self.mapping_name]
        # Retrieve encodings for the collection of mapping instances.
        # Retrieval is threaded as it is heavily bound by resource resolution
        # over http.
        # Queue for metarelate mapping instances
        mapenc_queue = Queue()
        for mapping in self.mappings:
            mapenc_queue.put(mapping)
        # deque to contain the results of the jobs processed from the queue
        mapencs = deque()
        # run worker threads
        for i in range(MAXTHREADS):
            MappingEncodeWorker(mapenc_queue, mapencs, fuseki_process).start()
        # block progress until the queue is empty
        mapenc_queue.join()
        # end of threaded retrieval process.

        # now sort the payload
        payload = [mapenc.encoding for mapenc in mapencs]
        payload.sort(key=self._key)
        lines.extend(payload)
        lines.append('    }\n')
        return iter(lines)
コード例 #3
0
class _BatchWriter(object):
    #: Truncate overly big items to that many bytes for the error message.
    ERRMSG_DATA_TRUNCATION_LEN = 1024

    def __init__(self,
                 url,
                 start,
                 auth,
                 size,
                 interval,
                 qsize,
                 maxitemsize,
                 content_encoding,
                 uploader,
                 callback=None):
        self.url = url
        self.offset = start
        self._nextid = count(start)
        self.auth = auth
        self.size = size
        self.interval = interval
        self.maxitemsize = maxitemsize
        self.content_encoding = content_encoding
        self.checkpoint = time.time()
        self.itemsq = Queue(size * 2 if qsize is None else qsize)
        self.closed = False
        self.flushme = False
        self.uploader = uploader
        self.callback = callback

    def write(self, item):
        assert not self.closed, 'attempting writes to a closed writer'
        data = jsonencode(item)
        if len(data) > self.maxitemsize:
            truncated_data = data[:self.ERRMSG_DATA_TRUNCATION_LEN] + "..."
            raise ValueTooLarge(
                'Value exceeds max encoded size of {}: {!r}'.format(
                    sizeof_fmt(self.maxitemsize), truncated_data))

        self.itemsq.put(data)
        if self.itemsq.full():
            self.uploader.interrupt()
        return next(self._nextid)

    def flush(self):
        self.flushme = True
        self._waitforq()
        self.flushme = False

    def close(self, block=True):
        self.closed = True
        if block:
            self._waitforq()

    def _waitforq(self):
        self.uploader.interrupt()
        self.itemsq.join()

    def __str__(self):
        return self.url
コード例 #4
0
ファイル: stress.py プロジェクト: dvska/splash
    def run(self):
        args = list(islice(self.reqs, self.requests))
        if self.shuffle:
            random.shuffle(args)
        print("Total requests: %d" % len(args))
        print("Concurrency   : %d" % self.concurrency)

        starttime = time.time()
        q, p = Queue(), Queue()
        for _ in six.moves.range(self.concurrency):
            t = Thread(target=worker, args=(self.host, q, p, self.verbose))
            t.daemon = True
            t.start()
        for a in args:
            q.put(a)
        q.join()

        outputs = []
        for _ in six.moves.range(self.requests):
            outputs.append(p.get())

        elapsed = time.time() - starttime
        print()
        print("Total requests: %d" % len(args))
        print("Concurrency   : %d" % self.concurrency)
        print("Elapsed time  : %.3fs" % elapsed)
        print("Avg time p/req: %.3fs" % (elapsed / len(args)))
        print("Received (per status code or error):")
        for c, n in Counter(outputs).items():
            print("  %s: %d" % (c, n))
コード例 #5
0
class Pool(object):
    class Error(Exception):
        pass

    def __init__(self, threads, host, port, ssl, user, password):
        self._threads = []
        self._queue = Queue(maxsize=1000)
        count = 0
        while len(self._threads) < threads and count < 3 * threads:
            try:
                count += 1
                w = Downloader(self._queue, host, port, ssl, user, password)
                w.start()
                self._threads.append(w)
            except SOFT_ERRORS as e:
                log.warn('Cannot create downloder thread: %s', e)

        if len(self._threads) != threads:
            log.error('Cannot create enough workers')
            raise Pool.Error('Cannot create enough workers')

    def wait_finish(self):
        self._queue.join()

    def stop(self):
        for t in self._threads:
            t.stop()

    def download(self, **kwargs):
        kwargs['retry'] = 0
        self._queue.put(kwargs)
コード例 #6
0
ファイル: __init__.py プロジェクト: juanmcloaiza/iris_fork
    def lines(self, fuseki_process):
        """
        Provides an iterator generating the encoded string representation
        of each member of this metarelate mapping translation.

        Returns:
            An iterator of string.

        """
        msg = '\tGenerating phenomenon translation {!r}.'
        print(msg.format(self.mapping_name))
        lines = ['\n%s = {\n' % self.mapping_name]
        # Retrieve encodings for the collection of mapping instances.
        # Retrieval is threaded as it is heavily bound by resource resolution
        # over http.
        # Queue for metarelate mapping instances
        mapenc_queue = Queue()
        for mapping in self.mappings:
            mapenc_queue.put(mapping)
        # deque to contain the results of the jobs processed from the queue
        mapencs = deque()
        # run worker threads
        for i in range(MAXTHREADS):
            MappingEncodeWorker(mapenc_queue, mapencs, fuseki_process).start()
        # block progress until the queue is empty
        mapenc_queue.join()
        # end of threaded retrieval process.

        # now sort the payload
        payload = [mapenc.encoding for mapenc in mapencs]
        payload.sort(key=self._key)
        lines.extend(payload)
        lines.append('    }\n')
        return iter(lines)
コード例 #7
0
class SimpleThreadPool:
    def __init__(self, num_threads=5):
        self._num_threads = num_threads
        self._queue = Queue(2000)
        self._lock = Lock()
        self._active = False
        self._workers = list()
        self._finished = False

    def add_task(self, func, *args, **kwargs):
        if not self._active:
            with self._lock:
                if not self._active:
                    self._active = True
                    for i in range(self._num_threads):
                        w = WorkerThread(self._queue)
                        self._workers.append(w)
                        w.start()

        self._queue.put((func, args, kwargs))

    def release(self):
        while self._queue.empty() is False:
            time.sleep(1)

    def wait_completion(self):
        self._queue.join()
        self._finished = True

    def get_result(self):
        assert self._finished
        detail = [worker.get_result() for worker in self._workers]
        succ_all = all([tp[1] == 0 for tp in detail])
        return {'success_all': succ_all, 'detail': detail}
コード例 #8
0
ファイル: pool.py プロジェクト: izderadicka/imap_detach
class Pool(object):
    class Error(Exception):
        pass
    
    def __init__(self, threads, host, port, ssl, user, password):
        self._threads=[]
        self._queue = Queue(maxsize=1000)
        count=0
        while len(self._threads) < threads and count < 3* threads:
            try:
                count+=1
                w=Downloader(self._queue, host, port, ssl, user, password)
                w.start()
                self._threads.append(w)
            except SOFT_ERRORS as e:
                log.warn('Cannot create downloder thread: %s', e)
                
        if len(self._threads) != threads:
            log.error('Cannot create enough workers')
            raise Pool.Error('Cannot create enough workers')
        
    def wait_finish(self):
        self._queue.join()
        
    def stop(self):
        for t in self._threads:
            t.stop()
        
    def download(self, **kwargs):
        kwargs['retry']=0
        self._queue.put(kwargs)
                
        
        
コード例 #9
0
ファイル: main.py プロジェクト: developmentseed/sentinel-s3
def daily_metadata(year, month, day, dst_folder, writers=[file_writer], geometry_check=None,
                   num_worker_threads=1):
    """ Extra metadata for all products in a specific date """

    threaded = False

    counter = {
        'products': 0,
        'saved_tiles': 0,
        'skipped_tiles': 0,
        'skipped_tiles_paths': []
    }

    if num_worker_threads > 1:
        threaded = True
        queue = Queue()

    # create folders
    year_dir = os.path.join(dst_folder, str(year))
    month_dir = os.path.join(year_dir, str(month))
    day_dir = os.path.join(month_dir, str(day))

    product_list = get_products_metadata_path(year, month, day)

    logger.info('There are %s products in %s-%s-%s' % (len(list(iterkeys(product_list))),
                                                       year, month, day))

    for name, product in iteritems(product_list):
        product_dir = os.path.join(day_dir, name)

        if threaded:
            queue.put([product, product_dir, counter, writers, geometry_check])
        else:
            counter = product_metadata(product, product_dir, counter, writers, geometry_check)

    if threaded:
        def worker():
            while not queue.empty():
                args = queue.get()
                try:
                    product_metadata(*args)
                except Exception:
                    exc = sys.exc_info()
                    logger.error('%s tile skipped due to error: %s' % (threading.current_thread().name,
                                                                       exc[1].__str__()))
                    args[2]['skipped_tiles'] += 1
                queue.task_done()

        threads = []
        for i in range(num_worker_threads):
            t = threading.Thread(target=worker)
            t.start()
            threads.append(t)

        queue.join()

    return counter
コード例 #10
0
class ThreadPool:
    def __init__(self, context={}, size=2):
        self.tasks = Queue(size)
        for num in range(size):
            Worker(self.tasks, context)

    def add(self, func, *args, **kargs):
        self.tasks.put((func, args, kargs))

    def wait(self):
        self.tasks.join()
コード例 #11
0
class Copier(object):
    def __init__(self, num_workers=4):
        self.num_workers = num_workers
        self.queue = None
        self.workers = []
        self.errors = []
        self.running = False

    def start(self):
        if self.num_workers:
            self.queue = Queue(maxsize=self.num_workers)
            self.workers = [_Worker(self) for _ in range(self.num_workers)]
            for worker in self.workers:
                worker.start()
        self.running = True

    def stop(self):
        if self.running and self.num_workers:
            for worker in self.workers:
                self.queue.put(None)
            for worker in self.workers:
                worker.join()
            self.queue.join()
        self.running = False

    def add_error(self, error):
        self.errors.append(error)

    def __enter__(self):
        self.start()
        return self

    def __exit__(
        self,
        exc_type,  # type: Optional[Type[BaseException]]
        exc_value,  # type: Optional[BaseException]
        traceback  # type: Optional[TracebackType]
    ):
        self.stop()

    def copy(self, src_fs, src_path, dst_fs, dst_path):
        """Copy a file from on fs to another."""
        src_file = src_fs.openbin(src_path, 'r')
        try:
            dst_file = dst_fs.openbin(dst_path, 'w')
        except Exception:
            # If dst file fails to open, explicitly close src_file
            src_file.close()
            raise
        task = _CopyTask(src_file, dst_file)
        if self.num_workers:
            self.queue.put(task)
        else:
            task()
コード例 #12
0
class _BatchWriter(object):
    #: Truncate overly big items to that many bytes for the error message.
    ERRMSG_DATA_TRUNCATION_LEN = 1024

    def __init__(self, url, start, auth, size, interval, qsize,
                 maxitemsize, content_encoding, uploader, callback=None):
        self.url = url
        self.offset = start
        self._nextid = count(start)
        self.auth = auth
        self.size = size
        self.interval = interval
        self.maxitemsize = maxitemsize
        self.content_encoding = content_encoding
        self.checkpoint = time.time()
        self.itemsq = Queue(size * 2 if qsize is None else qsize)
        self.closed = False
        self.flushme = False
        self.uploader = uploader
        self.callback = callback

    def write(self, item):
        assert not self.closed, 'attempting writes to a closed writer'
        data = jsonencode(item)
        if len(data) > self.maxitemsize:
            truncated_data = data[:self.ERRMSG_DATA_TRUNCATION_LEN] + "..."
            raise ValueTooLarge(
                'Value exceeds max encoded size of {} bytes: {!r}'
                .format(self.maxitemsize, truncated_data))

        self.itemsq.put(data)
        if self.itemsq.full():
            self.uploader.interrupt()
        return next(self._nextid)

    def flush(self):
        self.flushme = True
        self._waitforq()
        self.flushme = False

    def close(self, block=True):
        self.closed = True
        if block:
            self._waitforq()

    def _waitforq(self):
        self.uploader.interrupt()
        self.itemsq.join()

    def __str__(self):
        return self.url
コード例 #13
0
class ThreadPool:
    """Pool of threads consuming tasks from a queue."""
    def __init__(self, num_threads):
        self.tasks = Queue(num_threads)
        for _ in range(num_threads):
            Worker(self.tasks)

    def add_task(self, func, *args, **kargs):
        """Add a task to the queue."""
        self.tasks.put((func, args, kargs))

    def wait_completion(self):
        """Wait for completion of all the tasks in the queue."""
        self.tasks.join()
コード例 #14
0
ファイル: threadpool.py プロジェクト: PeterJCLaw/tools
class ThreadPool:
    """Pool of threads consuming tasks from a queue."""
    def __init__(self, num_threads):
        self.tasks = Queue(num_threads)
        for _ in range(num_threads):
            Worker(self.tasks)

    def add_task(self, func, *args, **kargs):
        """Add a task to the queue."""
        self.tasks.put((func, args, kargs))

    def wait_completion(self):
        """Wait for completion of all the tasks in the queue."""
        self.tasks.join()
コード例 #15
0
ファイル: utils.py プロジェクト: MortalCatalyst/lektor
class WorkerPool(object):

    def __init__(self, num_threads=None):
        if num_threads is None:
            num_threads = multiprocessing.cpu_count()
        self.tasks = Queue(num_threads)
        for _ in range(num_threads):
            Worker(self.tasks)

    def add_task(self, func, *args, **kargs):
        self.tasks.put((func, args, kargs))

    def wait_for_completion(self):
        self.tasks.join()
コード例 #16
0
def fetch_adblock_list():
    queue = Queue()
    for url in FILTER_URLS:
        queue.put(url)

    worker_count = min(len(FILTER_URLS), multiprocessing.cpu_count())
    workers = []
    for _ in range(worker_count):
        worker = Worker(queue)
        worker.start()
        workers.append(worker)

    queue.join()
    hosts_str = '\n'
    for worker in workers:
        hosts_str += worker.result
    return hosts_str
コード例 #17
0
def main():
    volumes = 900
    num_worker_threads = 25
    task = Queue()
    poems = Queue()
    for i in range(num_worker_threads):
        t = Thread(target=worker, args=(task, poems))
        t.daemon = True
        t.start()
    write_thread = Thread(target=write_poems, args=('./data/poems.txt', poems))
    write_thread.start()
    for js in range(1, volumes + 1):
        task.put(js)
    task.join()
    poems.join()
    poems.put(None)
    write_thread.join()
コード例 #18
0
class SimpleThreadPool:
    def __init__(self, num_threads=3):
        self._num_threads = num_threads
        self._queue = Queue(2000)
        self._lock = Lock()
        self._active = False
        self._workers = []
        self._finished = False

    def add_task(self, func, *args, **kwargs):
        if not self._active:
            with self._lock:
                if not self._active:
                    self._workers = []
                    self._active = True
                    for i in range(self._num_threads):
                        w = WorkerThread(self._queue)
                        self._workers.append(w)
                        w.start()

        self._queue.put((func, args, kwargs))

    def release(self):
        while self._queue.empty() is False:
            time.sleep(1)

    def wait_completion(self):
        self._queue.join()
        self._finished = True
        # 已经结束的任务, 需要将线程都退出, 防止卡死
        for i in range(self._num_threads):
            self._queue.put((None, None, None))

        self._active = False

    def complete(self):
        self._finished = True

    def get_result(self):
        assert self._finished
        detail = [worker.get_result() for worker in self._workers]
        succ_num = sum([tp[0] for tp in detail])
        fail_num = sum([tp[1] for tp in detail])
        return {'success_num': succ_num, 'fail_num': fail_num}
コード例 #19
0
ファイル: test_image_json.py プロジェクト: bjlittle/iris
    def test_resolve(self):
        repo_fname = os.path.join(os.path.dirname(__file__), "results", "imagerepo.json")
        with open(repo_fname, "rb") as fi:
            repo = json.load(codecs.getreader("utf-8")(fi))
        uris = list(chain.from_iterable(six.itervalues(repo)))
        uri_list = deque()
        exceptions = deque()
        uri_queue = Queue()
        for uri in uris:
            if uri.startswith("https://scitools.github.io"):
                uri_queue.put(uri)
            else:
                msg = "{} is not a valid resource.".format(uri)
                exceptions.append(ValueError(msg))

        for i in range(MAXTHREADS):
            _ResolveWorkerThread(uri_queue, uri_list, exceptions).start()
        uri_queue.join()
        self.assertEqual(deque(), exceptions)
コード例 #20
0
class SimpleThreadPool:

    def __init__(self, num_threads=5, num_queue=0):
        self._num_threads = num_threads
        self._queue = Queue(num_queue)
        self._lock = Lock()
        self._active = False
        self._workers = list()
        self._finished = False

    def add_task(self, func, *args, **kwargs):
        if not self._active:
            with self._lock:
                if not self._active:
                    self._workers = []
                    self._active = True

                    for i in range(self._num_threads):
                        w = WorkerThread(self._queue)
                        self._workers.append(w)
                        w.start()

        self._queue.put((func, args, kwargs))

    def wait_completion(self):
        self._queue.join()
        self._finished = True
        # 已经结束的任务, 需要将线程都退出, 防止卡死
        for i in range(self._num_threads):
            self._queue.put((None, None, None))

        self._active = False

    def get_result(self):
        assert self._finished
        detail = [worker.get_result() for worker in self._workers]
        succ_all = all([tp[1] == 0 for tp in detail])
        return {'success_all': succ_all, 'detail': detail}
コード例 #21
0
class WebRunner:
    resp_queue = None

    def __init__(self):
        self.resp_queue = Queue()

    def request(self, index, url, **kwargs):
        kwargs = flatten_kwargs(index, **kwargs)
        try:
            method = kwargs.pop('method', 'GET')
            self.resp_queue.put(
                (index, requests.request(method=method, url=url, **kwargs)))
        except Exception as e:
            self.resp_queue.put((index, None))
            print('Failed to download %s because %s.' % (url, e))

    def runner(self, **kwargs):
        while True:
            index, url = self.work_queue.get()
            if 'http://' not in url and 'https://' not in url:
                url = 'http://' + url
            self.request(index, url, **kwargs)
            self.work_queue.task_done()

    def run(self, urls, concurrency=4, **kwargs):
        self.work_queue = Queue()
        for url in enumerate(urls):
            self.work_queue.put(url)
        for i in range(concurrency):
            t = Thread(target=self.runner, kwargs=kwargs)
            t.daemon = True
            t.start()
        self.work_queue.join()
        responses = list(self.resp_queue.queue)
        responses = sorted(responses, key=lambda x: x[0])
        return [r[1] for r in responses]
コード例 #22
0
ファイル: util.py プロジェクト: Cue/scales
class GraphiteReporter(threading.Thread):
  """A graphite reporter thread."""

  def __init__(self, host, port, maxQueueSize=10000):
    """Connect to a Graphite server on host:port."""
    threading.Thread.__init__(self)

    self.host, self.port = host, port
    self.sock = None
    self.queue = Queue()
    self.maxQueueSize = maxQueueSize
    self.daemon = True


  def run(self):
    """Run the thread."""
    while True:
      try:
        try:
          name, value, valueType, stamp = self.queue.get()
        except TypeError:
          break
        self.log(name, value, valueType, stamp)
      finally:
        self.queue.task_done()


  def connect(self):
    """Connects to the Graphite server if not already connected."""
    if self.sock is not None:
      return
    backoff = 0.01
    while True:
      try:
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.settimeout(5)
        sock.connect((self.host, self.port))
        self.sock = sock
        return
      except socket.error:
        time.sleep(random.uniform(0, 2.0*backoff))
        backoff = min(backoff*2.0, 5.0)


  def disconnect(self):
    """Disconnect from the Graphite server if connected."""
    if self.sock is not None:
      try:
        self.sock.close()
      except socket.error:
        pass
      finally:
        self.sock = None


  def _sendMsg(self, msg):
    """Send a line to graphite. Retry with exponential backoff."""
    if not self.sock:
      self.connect()
    if not isinstance(msg, binary_type):
      msg = msg.encode("UTF-8")

    backoff = 0.001
    while True:
      try:
        self.sock.sendall(msg)
        break
      except socket.error:
        log.warning('Graphite connection error', exc_info = True)
        self.disconnect()
        time.sleep(random.uniform(0, 2.0*backoff))
        backoff = min(backoff*2.0, 5.0)
        self.connect()


  def _sanitizeName(self, name):
    """Sanitize a metric name."""
    return name.replace(' ', '-')


  def log(self, name, value, valueType=None, stamp=None):
    """Log a named numeric value. The value type may be 'value',
    'count', or None."""
    if type(value) == float:
      form = "%s%s %2.2f %d\n"
    else:
      form = "%s%s %s %d\n"

    if valueType is not None and len(valueType) > 0 and valueType[0] != '.':
      valueType = '.' + valueType

    if not stamp:
      stamp = time.time()

    self._sendMsg(form % (self._sanitizeName(name), valueType or '', value, stamp))


  def enqueue(self, name, value, valueType=None, stamp=None):
    """Enqueue a call to log."""
    # If queue is too large, refuse to log.
    if self.maxQueueSize and self.queue.qsize() > self.maxQueueSize:
      return
    # Stick arguments into the queue
    self.queue.put((name, value, valueType, stamp))


  def flush(self):
    """Block until all stats have been sent to Graphite."""
    self.queue.join()


  def shutdown(self):
    """Shut down the background thread."""
    self.queue.put(None)
    self.flush()
コード例 #23
0
ファイル: execute.py プロジェクト: ashvark/galaxy
def execute( trans, tool, param_combinations, history, rerun_remap_job_id=None, collection_info=None, workflow_invocation_uuid=None ):
    """
    Execute a tool and return object containing summary (output data, number of
    failures, etc...).
    """
    all_jobs_timer = ExecutionTimer()
    execution_tracker = ToolExecutionTracker( tool, param_combinations, collection_info )
    app = trans.app
    execution_cache = ToolExecutionCache(trans)

    def execute_single_job(params):
        job_timer = ExecutionTimer()
        if workflow_invocation_uuid:
            params[ '__workflow_invocation_uuid__' ] = workflow_invocation_uuid
        elif '__workflow_invocation_uuid__' in params:
            # Only workflow invocation code gets to set this, ignore user supplied
            # values or rerun parameters.
            del params[ '__workflow_invocation_uuid__' ]
        job, result = tool.handle_single_execution( trans, rerun_remap_job_id, params, history, collection_info, execution_cache )
        if job:
            message = EXECUTION_SUCCESS_MESSAGE % (tool.id, job.id, job_timer)
            log.debug(message)
            execution_tracker.record_success( job, result )
        else:
            execution_tracker.record_error( result )

    config = app.config
    burst_at = getattr( config, 'tool_submission_burst_at', 10 )
    burst_threads = getattr( config, 'tool_submission_burst_threads', 1 )

    tool_action = tool.tool_action
    if hasattr( tool_action, "check_inputs_ready" ):
        for params in execution_tracker.param_combinations:
            # This will throw an exception if the tool is not ready.
            tool_action.check_inputs_ready(
                tool,
                trans,
                params,
                history
            )

    job_count = len(execution_tracker.param_combinations)
    if job_count < burst_at or burst_threads < 2:
        for params in execution_tracker.param_combinations:
            execute_single_job(params)
    else:
        q = Queue()

        def worker():
            while True:
                params = q.get()
                execute_single_job(params)
                q.task_done()

        for i in range(burst_threads):
            t = Thread(target=worker)
            t.daemon = True
            t.start()

        for params in execution_tracker.param_combinations:
            q.put(params)

        q.join()

    log.debug("Executed %d job(s) for tool %s request: %s" % (job_count, tool.id, all_jobs_timer))
    if collection_info:
        history = history or tool.get_default_history_by_trans( trans )
        if len(param_combinations) == 0:
            template = "Attempting to map over an empty collection, this is not yet implemented. colleciton_info is [%s]"
            message = template % collection_info
            log.warn(message)
            raise Exception(message)
        params = param_combinations[0]
        execution_tracker.create_output_collections( trans, history, params )

    return execution_tracker
コード例 #24
0
ファイル: execute.py プロジェクト: ImmPortDB/immport-galaxy
def execute(trans, tool, mapping_params, history, rerun_remap_job_id=None, collection_info=None, workflow_invocation_uuid=None, invocation_step=None, max_num_jobs=None, job_callback=None, completed_jobs=None, workflow_resource_parameters=None):
    """
    Execute a tool and return object containing summary (output data, number of
    failures, etc...).
    """
    if max_num_jobs:
        assert invocation_step is not None
    if rerun_remap_job_id:
        assert invocation_step is None

    all_jobs_timer = ExecutionTimer()
    if invocation_step is None:
        execution_tracker = ToolExecutionTracker(trans, tool, mapping_params, collection_info)
    else:
        execution_tracker = WorkflowStepExecutionTracker(trans, tool, mapping_params, collection_info, invocation_step, job_callback=job_callback)
    app = trans.app
    execution_cache = ToolExecutionCache(trans)

    def execute_single_job(execution_slice, completed_job):
        job_timer = ExecutionTimer()
        params = execution_slice.param_combination
        if workflow_invocation_uuid:
            params['__workflow_invocation_uuid__'] = workflow_invocation_uuid
        elif '__workflow_invocation_uuid__' in params:
            # Only workflow invocation code gets to set this, ignore user supplied
            # values or rerun parameters.
            del params['__workflow_invocation_uuid__']
        if workflow_resource_parameters:
            params['__workflow_resource_params__'] = workflow_resource_parameters
        elif '__workflow_resource_params__' in params:
            # Only workflow invocation code gets to set this, ignore user supplied
            # values or rerun parameters.
            del params['__workflow_resource_params__']
        job, result = tool.handle_single_execution(trans, rerun_remap_job_id, execution_slice, history, execution_cache, completed_job)
        if job:
            message = EXECUTION_SUCCESS_MESSAGE % (tool.id, job.id, job_timer)
            log.debug(message)
            execution_tracker.record_success(execution_slice, job, result)
        else:
            execution_tracker.record_error(result)

    tool_action = tool.tool_action
    if hasattr(tool_action, "check_inputs_ready"):
        for params in execution_tracker.param_combinations:
            # This will throw an exception if the tool is not ready.
            tool_action.check_inputs_ready(
                tool,
                trans,
                params,
                history
            )

    execution_tracker.ensure_implicit_collections_populated(history, mapping_params.param_template)
    config = app.config
    burst_at = getattr(config, 'tool_submission_burst_at', 10)
    burst_threads = getattr(config, 'tool_submission_burst_threads', 1)

    job_count = len(execution_tracker.param_combinations)

    jobs_executed = 0
    has_remaining_jobs = False

    if (job_count < burst_at or burst_threads < 2):
        for i, execution_slice in enumerate(execution_tracker.new_execution_slices()):
            if max_num_jobs and jobs_executed >= max_num_jobs:
                has_remaining_jobs = True
                break
            else:
                execute_single_job(execution_slice, completed_jobs[i])
    else:
        # TODO: re-record success...
        q = Queue()

        def worker():
            while True:
                params = q.get()
                execute_single_job(params)
                q.task_done()

        for i in range(burst_threads):
            t = Thread(target=worker)
            t.daemon = True
            t.start()

        for i, execution_slice in enumerate(execution_tracker.new_execution_slices()):
            if max_num_jobs and jobs_executed >= max_num_jobs:
                has_remaining_jobs = True
                break
            else:
                q.put(execution_slice, completed_jobs[i])
                jobs_executed += 1

        q.join()

    if has_remaining_jobs:
        raise PartialJobExecution(execution_tracker)
    else:
        execution_tracker.finalize_dataset_collections(trans)

    log.debug("Executed %d job(s) for tool %s request: %s" % (job_count, tool.id, all_jobs_timer))
    return execution_tracker
コード例 #25
0
ファイル: utils.py プロジェクト: wiggzz/downstream-farmer
class ThreadPool(object):

    def __init__(self, thread_manager, thread_count=10):
        """Initialization method

        :param thread_manager: the thread manager to use
        :param thread_count: the number of workers to instantiate
        """
        self.logger = logging.getLogger(
            'storj.downstream_farmer.utils.ThreadPool')
        self.tasks = Queue()
        self.thread_manager = thread_manager
        self.workers = list()
        self.workers_lock = threading.Lock()
        for i in range(0, thread_count):
            self._add_thread()
        # managed monitor thread
        self.monitor_thread = self.thread_manager.create_thread(
            name='MonitorThread',
            target=self._monitor)
        self.load_minimum = 0.01
        self.load_maximum = 0.25

    def thread_count(self):
        with self.workers_lock:
            return len(self.workers)

    def _add_thread(self):
        # unmanaged worker threads
        self.logger.debug(
            '{0} : adding worker'.format(threading.current_thread()))
        worker = WorkerThread(self)
        with self.workers_lock:
            self.workers.append(worker)
        return worker

    def _remove_thread(self):
        with self.workers_lock:
            if (len(self.workers) > 1):
                self.logger.debug(
                    '{0} : removing worker'.format(threading.current_thread()))
                # make sure to retain one worker
                thread = self.workers.popleft()
                thread.stop()

    def calculate_loading(self):
        total_time = 0
        work_time = 0
        with self.workers_lock:
            for w in self.workers:
                total_time += w.load_tracker.total_time()
                work_time += w.load_tracker.work_time()
        if (total_time > 0):
            load = float(work_time) / float(total_time)
        else:
            load = 0
        return load

    def max_load(self):
        max = 0
        with self.workers_lock:
            for w in self.workers:
                load = w.load_tracker.load()
                if (load > max):
                    max = load
        return max

    def check_loading(self):
        self.monitor_thread.wake()

    def _monitor(self):
        """This runs until the thread manager wakes it up during
        shutdown, at which time it will wait for any unfinished work in the
        queue, and then finish, allowing the program to exit
        """
        # wait until shutdown is called
        while (self.thread_manager.running):
            # check loading every second to see if we should add another
            # thread.
            load = self.calculate_loading()
            if (load > self.load_maximum):
                worker = self._add_thread()
                worker.start()
            elif (load < self.load_minimum):
                self._remove_thread()
            self.thread_manager.sleep(10)
        # wait for any existing work to finish
        self.logger.debug('MonitorThread waiting for tasks to finish')
        self.tasks.join()
        self.logger.debug('MonitorThread finishing')
        # now, managed thread can exit so program can close cleanly

    def put_work(self, target, args=[], kwargs={}):
        """Puts work in the work queue.
        :param work: callable work object
        """
        self.tasks.put(WorkItem(target, args, kwargs))

    def start(self):
        """Starts the thread pool and all its workers and the monitor thread
        """
        with self.workers_lock:
            for worker in self.workers:
                worker.start()
        self.monitor_thread.start()
コード例 #26
0
def test_cancel_fn(executor, caplog):
    task_id_queue = Queue()
    tasks = {}
    poll_fn = partial(poll_tasks, tasks)

    def cancel_fn(task):
        if task.startswith("cancel-true-"):
            return True
        if task.startswith("cancel-false-"):
            return False
        raise RuntimeError("simulated cancel error")

    poll_executor = PollExecutor(executor,
                                 poll_fn,
                                 cancel_fn,
                                 default_interval=0.01)

    def make_task(x):
        got = task_id_queue.get(True)
        task_id_queue.task_done()
        return "%s-%s" % (x, got)

    inputs = ["cancel-true", "cancel-false", "cancel-error"]
    futures = [poll_executor.submit(make_task, x) for x in inputs]

    # The futures should not currently be able to progress.
    assert_that(not any([f.done() for f in futures]))

    # Allow tasks to be created.
    task_id_queue.put("x")
    task_id_queue.put("y")
    task_id_queue.put("z")

    # Wait until all tasks were created and futures moved
    # into poll mode
    task_id_queue.join()

    # Wait until the make_task function definitely completed in each thread,
    # which can be determined by running==False
    assert_soon(lambda: assert_that(all([not f.running() for f in futures])))

    # Should be able to cancel soon.
    # Why "soon" instead of "now" - because even though the futures above are
    # not running, the delegate may not have been cleared yet.  Cancel needs
    # to wait until the future's delegate is cleared and the future has
    # transitioned fully into "poll mode".
    assert_soon(lambda: assert_that(futures[0].cancel()))

    # The other two futures don't need assert_soon since the cancel result is negative.

    # Cancel behavior should be consistent (calling multiple times same
    # as calling once)
    for _ in 1, 2:
        # Cancelling the cancel-true task should be allowed.
        assert_that(futures[0].cancel())

        # Cancelling the cancel-false task should not be allowed.
        assert_that(not futures[1].cancel())

        # Cancelling the cancel-error task should not be allowed.
        assert_that(not futures[2].cancel())

    # An error should have been logged due to the cancel function raising.
    if caplog:
        assert_that(
            caplog.record_tuples,
            has_item(
                contains(
                    "PollExecutor",
                    logging.ERROR,
                    matches_regexp(r"Exception during cancel .*/cancel-error"),
                )),
        )
コード例 #27
0
ファイル: util.py プロジェクト: e42s/scales
class GraphiteReporter(threading.Thread):
    """A graphite reporter thread."""
    def __init__(self, host, port, maxQueueSize=10000):
        """Connect to a Graphite server on host:port."""
        threading.Thread.__init__(self)

        self.host, self.port = host, port
        self.sock = None
        self.queue = Queue()
        self.maxQueueSize = maxQueueSize
        self.daemon = True

    def run(self):
        """Run the thread."""
        while True:
            try:
                try:
                    name, value, valueType, stamp = self.queue.get()
                except TypeError:
                    break
                self.log(name, value, valueType, stamp)
            finally:
                self.queue.task_done()

    def connect(self):
        """Connects to the Graphite server if not already connected."""
        if self.sock is not None:
            return
        backoff = 0.01
        while True:
            try:
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.settimeout(5)
                sock.connect((self.host, self.port))
                self.sock = sock
                return
            except socket.error:
                time.sleep(random.uniform(0, 2.0 * backoff))
                backoff = min(backoff * 2.0, 5.0)

    def disconnect(self):
        """Disconnect from the Graphite server if connected."""
        if self.sock is not None:
            try:
                self.sock.close()
            except socket.error:
                pass
            finally:
                self.sock = None

    def _sendMsg(self, msg):
        """Send a line to graphite. Retry with exponential backoff."""
        if not self.sock:
            self.connect()
        backoff = 0.001
        while True:
            try:
                self.sock.sendall(msg)
                break
            except socket.error:
                logging.warning('Graphite connection error', exc_info=True)
                self.disconnect()
                time.sleep(random.uniform(0, 2.0 * backoff))
                backoff = min(backoff * 2.0, 5.0)
                self.connect()

    def _sanitizeName(self, name):
        """Sanitize a metric name."""
        return name.replace(' ', '-')

    def log(self, name, value, valueType=None, stamp=None):
        """Log a named numeric value. The value type may be 'value',
    'count', or None."""
        if type(value) == float:
            form = "%s%s %2.2f %d\n"
        else:
            form = "%s%s %s %d\n"

        if valueType is not None and len(
                valueType) > 0 and valueType[0] != '.':
            valueType = '.' + valueType

        if not stamp:
            stamp = time.time()

        self._sendMsg(
            form % (self._sanitizeName(name), valueType or '', value, stamp))

    def enqueue(self, name, value, valueType=None, stamp=None):
        """Enqueue a call to log."""
        # If queue is too large, refuse to log.
        if self.maxQueueSize and self.queue.qsize() > self.maxQueueSize:
            return
        # Stick arguments into the queue
        self.queue.put((name, value, valueType, stamp))

    def flush(self):
        """Block until all stats have been sent to Graphite."""
        self.queue.join()

    def shutdown(self):
        """Shut down the background thread."""
        self.queue.put(None)
        self.flush()
コード例 #28
0
ファイル: auto_dataset.py プロジェクト: aetros/aetros-cli
def read_images_in_memory(job_model, dataset, node, trainer):
    """
    Reads all images into memory and applies augmentation if enabled
    """
    concurrent = psutil.cpu_count()

    dataset_config = dataset['config']
    controller = {'running': True}
    q = Queue(concurrent)

    result = {
        'X_train': [],
        'Y_train': [],
        'X_test': [],
        'Y_test': []
    }

    images = []
    max = 0

    path = job_model.get_dataset_downloads_dir(dataset)
    if 'path' in dataset['config']:
        path = dataset['config']['path']

    classes_count = 0
    category_map = {}
    classes = []

    trainer.set_status('LOAD IMAGES INTO MEMORY')

    try:
        for i in range(concurrent):
            t = ImageReadWorker(q, job_model, node, path, images, controller)
            t.daemon = True
            t.start()

        for validation_or_training in ['validation', 'training']:
            if os.path.isdir(os.path.normpath(path + '/' + validation_or_training)):
                for category_name in os.listdir(os.path.normpath(path + '/' + validation_or_training)):
                    if os.path.isdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):

                        if category_name not in category_map:
                            category_map[category_name] = classes_count
                            if 'classes' in dataset_config and 'category_' in category_name:
                                category_idx = int(category_name.replace('category_', ''))
                                category_map[category_name] = category_idx
                                target_category = dataset_config['classes'][category_idx]
                                classes.append(target_category['title'] or 'Class %s' % (category_idx, ))
                            else:
                                classes.append(category_name)

                            classes_count += 1

                        for id in os.listdir(os.path.normpath(path + '/' + validation_or_training + '/' + category_name)):
                            file_path = os.path.join(path, validation_or_training, category_name, id)
                            q.put([file_path, validation_or_training == 'validation', category_name])
                            max += 1

        q.join()
        controller['running'] = False

        train_images = []
        test_images = []

        for v in images:
            image, validation, category_dir = v
            if validation is True:
                test_images.append([image, category_map[category_dir]])
            else:
                train_images.append([image, category_map[category_dir]])

        train_datagen = None
        augmentation = bool(get_option(dataset_config, 'augmentation', False))
        if augmentation:
            train_datagen = get_image_data_augmentor_from_dataset(dataset)

        train = InMemoryDataGenerator(train_datagen, train_images, classes_count, job_model.job['config']['batchSize'])

        test = InMemoryDataGenerator(None, test_images, classes_count, job_model.job['config']['batchSize'])

        nb_sample = len(train_images)
        trainer.set_info('Dataset size', {'training': nb_sample, 'validation': len(test_images)})
        trainer.set_generator_training_nb(nb_sample)
        trainer.set_generator_validation_nb(len(test_images))

        trainer.logger.info(("Found %d classes, %d images (%d in training [%saugmented], %d in validation). Read all images into memory from %s" %
               (classes_count, max, len(train_images), 'not ' if augmentation is False else '', len(test_images), path)))

        if classes_count == 0:
            trainer.logger.warning("Could not find any classes. Does the directory contains images?")
            sys.exit(1)

        trainer.output_size = classes_count
        trainer.set_info('classes', classes)
        trainer.classes = classes

        result['X_train'] = train
        result['Y_train'] = train
        result['X_test'] = test
        result['Y_test'] = test

        return result

    except KeyboardInterrupt:
        controller['running'] = False
        sys.exit(1)
コード例 #29
0
ファイル: auto_dataset.py プロジェクト: bityangke/aetros-cli
def get_images(job_model, dataset, node, trainer):
    concurrent = 15

    from PIL import ImageFile
    if hasattr(ImageFile, 'LOAD_TRUNCATED_IMAGES'):
        ImageFile.LOAD_TRUNCATED_IMAGES = True

    q = Queue(concurrent)
    config = dataset['config']

    dir = trainer.job_model.get_dataset_downloads_dir(dataset)

    ensure_dir(dir)

    if 'classes' not in config or not config['classes']:
        print("Dataset %s does not contain any classes." % (dataset['id'], ))
        return {
            'X_train': np.array([]),
            'Y_train': np.array([]),
            'X_test': np.array([]),
            'Y_test': np.array([])
        }

    classes = config['classes']

    trainer.set_status('PREPARE_IMAGES')

    max = 0
    images = {}

    dataset_path = trainer.job_model.get_dataset_downloads_dir(dataset)
    meta_information_file = dataset_path + '/meta.json'

    classes_changed = False
    config_changed = False
    had_previous = False
    classes_md5 = hashlib.md5(
        json.dumps(classes,
                   default=invalid_json_values).encode('utf-8')).hexdigest()

    validationFactor = 0.2

    if os.path.isdir(dataset_path):
        if os.path.isfile(meta_information_file):
            with open(meta_information_file) as f:
                meta = json.load(f)
                if meta:
                    had_previous = True
                    if 'classes_md5' in meta and meta[
                            'classes_md5'] != classes_md5:
                        classes_changed = True

                    trigger_changed = [
                        'resize', 'resizeWidth', 'resizeHeight',
                        'resizeCompression'
                    ]
                    for i in trigger_changed:
                        if i in meta['config'] and i in config and meta[
                                'config'][i] != config[i]:
                            config_changed = True
                else:
                    config_changed = True
        else:
            config_changed = True

    need_download = classes_changed or config_changed

    if need_download:
        if had_previous:
            print("Reset dataset and re-download images to " + dir)
            if classes_changed:
                print(" .. because classes changed")
            if config_changed:
                print(" .. because settings changed")
        else:
            print("Download images to " + dir)

        resize = bool(get_option(config, 'resize', True))
        if resize:
            resizeSize = (int(get_option(config, 'resizeWidth', 64)),
                          int(get_option(config, 'resizeHeight', 64)))
            print(" .. with resizing to %dx%d " % resizeSize)

        # we need to donwload all images
        shutil.rmtree(dataset_path)

        controller = {'running': True}
        try:
            for category in classes:
                max += len(category['images'])

            for i in range(concurrent):
                t = ImageDownloaderWorker(q, trainer, dataset, max, images,
                                          controller)
                t.daemon = True
                t.start()

            for category_idx, category in enumerate(classes):
                for image in category['images']:
                    q.put([image, category_idx])

            q.join()
            controller['running'] = False

            def move_image(image, category='training'):
                if image['id'] in images and os.path.isfile(
                        images[image['id']]):
                    target_path = dataset_path + \
                        '/%s/category_%s/%s' % (category, category_idx,
                                                os.path.basename(images[image['id']]))
                    ensure_dir(os.path.dirname(target_path))
                    os.rename(images[image['id']], target_path)

            for category_idx, category in enumerate(classes):
                random.shuffle(category['images'])
                position = int(
                    math.ceil(len(category['images']) * validationFactor))

                ensure_dir(dataset_path + '/training')
                ensure_dir(dataset_path + '/validation')

                for image in category['images'][position:]:  # test data
                    if image['id'] in images and os.path.isfile(
                            images[image['id']]):
                        move_image(image, 'training')

                for image in category['images'][:position]:  # validation data
                    if image['id'] in images and os.path.isfile(
                            images[image['id']]):
                        move_image(image, 'validation')

            with open(meta_information_file, 'w') as f:
                meta = {
                    'loaded_at': classes_md5,
                    'classes_md5': classes_md5,
                    'config': config
                }
                json.dump(meta, f, default=invalid_json_values)

        except KeyboardInterrupt:
            controller['running'] = False
            sys.exit(1)
    else:
        print("Downloaded images up2date in " + dir)
        print(
            " - Remove this directory if you want to re-download all images of your dataset and re-shuffle training/validation images."
        )

    trainer.output_size = len(classes)
    trainer.set_status('LOAD IMAGE DONE')

    # change to type local_images
    dataset_transformed = dataset.copy()
    dataset_transformed['config']['path'] = dir

    all_memory = get_option(dataset['config'], 'allMemory', False, 'bool')

    if all_memory:
        return read_images_in_memory(job_model, dataset_transformed, node,
                                     trainer)
    else:
        return read_images_keras_generator(job_model, dataset_transformed,
                                           node, trainer)
コード例 #30
0
ファイル: auto_dataset.py プロジェクト: bityangke/aetros-cli
def read_images_in_memory(job_model, dataset, node, trainer):
    """
    Reads all images into memory and applies augmentation if enabled
    """
    concurrent = psutil.cpu_count()

    dataset_config = dataset['config']
    controller = {'running': True}
    config = dataset['config']  # TODO: config not used
    q = Queue(concurrent)

    result = {'X_train': [], 'Y_train': [], 'X_test': [], 'Y_test': []}

    images = []
    max = 0

    path = trainer.job_model.get_dataset_downloads_dir(dataset)
    if 'path' in dataset['config']:
        path = dataset['config']['path']

    classes_count = 0
    category_map = {}
    classes = []

    try:
        for i in range(concurrent):
            t = ImageReadWorker(q, job_model, node, path, images, controller)
            t.daemon = True
            t.start()

        for validation_or_training in ['validation', 'training']:
            if os.path.isdir(path + '/' + validation_or_training):
                for category_name in os.listdir(path + '/' +
                                                validation_or_training):
                    if os.path.isdir(path + '/' + validation_or_training +
                                     '/' + category_name):

                        if category_name not in category_map:
                            category_map[category_name] = classes_count
                            if 'classes' in dataset_config and 'category_' in category_name:
                                category_idx = int(
                                    category_name.replace('category_', ''))
                                category_map[category_name] = category_idx
                                target_category = dataset_config['classes'][
                                    category_idx]
                                classes.append(target_category['title']
                                               or 'Class %s' %
                                               (category_idx, ))
                            else:
                                classes.append(category_name)

                            classes_count += 1

                        for id in os.listdir(path + '/' +
                                             validation_or_training + '/' +
                                             category_name):
                            file_path = os.path.join(path,
                                                     validation_or_training,
                                                     category_name, id)
                            q.put([
                                file_path,
                                validation_or_training == 'validation',
                                category_name
                            ])
                            max += 1

        q.join()
        controller['running'] = False

        train_images = []
        test_images = []

        for v in images:
            image, validation, category_dir = v
            if validation is True:
                test_images.append([image, category_map[category_dir]])
            else:
                train_images.append([image, category_map[category_dir]])

        train_datagen = None
        augmentation = bool(get_option(dataset_config, 'augmentation', False))
        if augmentation:
            train_datagen = get_image_data_augmentor_from_dataset(dataset)

        train = InMemoryDataGenerator(
            train_datagen, train_images, classes_count,
            job_model.job['config']['settings']['batchSize'])

        test = InMemoryDataGenerator(
            None, test_images, classes_count,
            job_model.job['config']['settings']['batchSize'])

        nb_sample = len(train_images)
        trainer.set_generator_training_nb(nb_sample)
        trainer.set_generator_validation_nb(len(test_images))

        print((
            "Found %d classes, %d images (%d in training [%saugmented], %d in validation). Read all images into memory from %s"
            %
            (classes_count, max, len(train_images),
             'not ' if augmentation is False else '', len(test_images), path)))

        if classes_count == 0:
            print(
                "Could not find any classes. Does the directory contains images?"
            )
            sys.exit(1)

        trainer.output_size = classes_count
        trainer.set_job_system_info('classes', classes)
        trainer.classes = classes

        result['X_train'] = train
        result['Y_train'] = train
        result['X_test'] = test
        result['Y_test'] = test

        return result

    except KeyboardInterrupt:
        controller['running'] = False
        sys.exit(1)
コード例 #31
0
class TaskIO(object):
    """Object used to stream I/O between a
    running Mesos task and the local terminal.

    :param task: task ID
    :type task: str
    :param cmd: a command to launch inside the task's container
    :type cmd: str
    :param args: Additional arguments for the command
    :type args: str
    :param interactive: whether to attach STDIN of the current
                        terminal to the new command being launched
    :type interactive: bool
    :param tty: whether to allocate a tty for this command and attach
                the local terminal to it
    :type tty: bool
    """

    # The interval to send heartbeat messages to
    # keep persistent connections alive.
    HEARTBEAT_INTERVAL = 30
    HEARTBEAT_INTERVAL_NANOSECONDS = HEARTBEAT_INTERVAL * 1000000000

    def __init__(self,
                 task_id,
                 cmd=None,
                 args=None,
                 interactive=False,
                 tty=False):
        # Store relevant parameters of the call for later.
        self.cmd = cmd
        self.interactive = interactive
        self.tty = tty
        self.args = args

        # Create a client and grab a reference to the DC/OS master.
        client = DCOSClient()
        master = get_master(client)

        # Get the task and make sure its container was launched by the UCR.
        # Since task's containers are launched by the UCR by default, we want
        # to allow most tasks to pass through unchecked. The only exception is
        # when a task has an explicit container specified and it is not of type
        # "MESOS". Having a type of "MESOS" implies that it was launched by the
        # UCR -- all other types imply it was not.
        task_obj = master.task(task_id)
        if "container" in task_obj.dict():
            if "type" in task_obj.dict()["container"]:
                if task_obj.dict()["container"]["type"] != "MESOS":
                    raise DCOSException(
                        "This command is only supported for tasks"
                        " launched by the Universal Container Runtime (UCR).")

        # Get the URL to the agent running the task.
        if client._mesos_master_url:
            self.agent_url = client.slave_url(
                slave_id="",
                private_url=task_obj.slave().http_url(),
                path="api/v1")
        else:
            self.agent_url = client.slave_url(slave_id=task_obj.slave()['id'],
                                              private_url="",
                                              path="api/v1")

        # Grab a reference to the container ID for the task.
        self.parent_id = master.get_container_id(task_obj)

        # Generate a new UUID for the nested container
        # used to run commands passed to `task exec`.
        self.container_id = str(uuid.uuid4())

        # Set up a recordio encoder and decoder
        # for any incoming and outgoing messages.
        self.encoder = recordio.Encoder(
            lambda s: bytes(json.dumps(s, ensure_ascii=False), "UTF-8"))
        self.decoder = recordio.Decoder(
            lambda s: json.loads(s.decode("UTF-8")))

        # Set up queues to send messages between threads used for
        # reading/writing to STDIN/STDOUT/STDERR and threads
        # sending/receiving data over the network.
        self.input_queue = Queue()
        self.output_queue = Queue()

        # Set up an event to block attaching
        # input until attaching output is complete.
        self.attach_input_event = threading.Event()
        self.attach_input_event.clear()

        # Set up an event to block printing the output
        # until an attach input event has successfully
        # been established.
        self.print_output_event = threading.Event()
        self.print_output_event.clear()

        # Set up an event to block the main thread
        # from exiting until signaled to do so.
        self.exit_event = threading.Event()
        self.exit_event.clear()

        # Use a class variable to store exceptions thrown on
        # other threads and raise them on the main thread before
        # exiting.
        self.exception = None

    def run(self):
        """Run the helper threads in this class which enable streaming
        of STDIN/STDOUT/STDERR between the CLI and the Mesos Agent API.

        If a tty is requested, we take over the current terminal and
        put it into raw mode. We make sure to reset the terminal back
        to its original settings before exiting.
        """

        # Without a TTY.
        if not self.tty:
            try:
                self._start_threads()
                self.exit_event.wait()
            except Exception as e:
                self.exception = e

            if self.exception:
                raise self.exception
            return

        # With a TTY.
        if util.is_windows_platform():
            raise DCOSException(
                "Running with the '--tty' flag is not supported on windows.")

        if not sys.stdin.isatty():
            raise DCOSException(
                "Must be running in a tty to pass the '--tty flag'.")

        fd = sys.stdin.fileno()
        oldtermios = termios.tcgetattr(fd)

        try:
            if self.interactive:
                tty.setraw(fd, when=termios.TCSANOW)
                self._window_resize(signal.SIGWINCH, None)
                signal.signal(signal.SIGWINCH, self._window_resize)

            self._start_threads()
            self.exit_event.wait()
        except Exception as e:
            self.exception = e

        termios.tcsetattr(sys.stdin.fileno(), termios.TCSAFLUSH, oldtermios)

        if self.exception:
            raise self.exception

    def _thread_wrapper(self, func):
        """A wrapper around all threads used in this class

        If a thread throws an exception, it will unblock the main
        thread and save the exception in a class variable. The main
        thread will then rethrow the exception before exiting.

        :param func: The start function for the thread
        :type func: function
        """
        try:
            func()
        except Exception as e:
            self.exception = e
            self.exit_event.set()

    def _start_threads(self):
        """Start all threads associated with this class
        """
        if self.interactive:
            # Collects input from STDIN and puts
            # it in the input_queue as data messages.
            thread = threading.Thread(target=self._thread_wrapper,
                                      args=(self._input_thread, ))
            thread.daemon = True
            thread.start()

            # Prepares heartbeat control messages and
            # puts them in the input queueaat a specific
            # heartbeat interval.
            thread = threading.Thread(target=self._thread_wrapper,
                                      args=(self._heartbeat_thread, ))
            thread.daemon = True
            thread.start()

            # Opens a persistent connection with the mesos agent and
            # feeds it both control and data messages from the input
            # queue via ATTACH_CONTAINER_INPUT messages.
            thread = threading.Thread(target=self._thread_wrapper,
                                      args=(self._attach_container_input, ))
            thread.daemon = True
            thread.start()

        # Opens a persistent connection with a mesos agent, reads
        # data messages from it and feeds them to an output_queue.
        thread = threading.Thread(
            target=self._thread_wrapper,
            args=(self._launch_nested_container_session, ))
        thread.daemon = True
        thread.start()

        # Collects data messages from the output queue and writes
        # their content to STDOUT and STDERR.
        thread = threading.Thread(target=self._thread_wrapper,
                                  args=(self._output_thread, ))
        thread.daemon = True
        thread.start()

    def _launch_nested_container_session(self):
        """Sends a request to the Mesos Agent to launch a new
        nested container and attach to its output stream.
        The output stream is then sent back in the response.
        """

        message = {
            'type': "LAUNCH_NESTED_CONTAINER_SESSION",
            'launch_nested_container_session': {
                'container_id': {
                    'parent': self.parent_id,
                    'value': self.container_id
                },
                'command': {
                    'value': self.cmd,
                    'arguments': [self.cmd] + self.args,
                    'shell': False
                }
            }
        }

        if self.tty:
            message['launch_nested_container_session']['container'] = {
                'type': 'MESOS',
                'tty_info': {}
            }

        req_extra_args = {
            'stream': True,
            'headers': {
                'Content-Type': 'application/json',
                'Accept': 'application/recordio',
                'Message-Accept': 'application/json'
            }
        }

        response = http.post(self.agent_url,
                             data=json.dumps(message),
                             timeout=None,
                             **req_extra_args)

        self._process_output_stream(response)

    def _process_output_stream(self, response):
        """Gets data streamed over the given response and places the
        returned messages into our output_queue. Only expects to
        receive data messages.

        :param response: Response from an http post
        :type response: requests.models.Response
        """

        # Now that we are ready to process the output stream (meaning
        # our output connection has been established), allow the input
        # stream to be attached by setting an event.
        self.attach_input_event.set()

        # If we are running in interactive mode, wait to make sure that
        # our input connection succeeds before pushing any output to the
        # output queue.
        if self.interactive:
            self.print_output_event.wait()

        try:
            for chunk in response.iter_content(chunk_size=None):
                records = self.decoder.decode(chunk)

                for r in records:
                    if r.get('type') and r['type'] == 'DATA':
                        self.output_queue.put(r['data'])
        except Exception as e:
            raise DCOSException(
                "Error parsing output stream: {error}".format(error=e))

        self.output_queue.join()
        self.exit_event.set()

    def _attach_container_input(self):
        """Streams all input data (e.g. STDIN) from the client to the agent
        """
        def _initial_input_streamer():
            """Generator function yielding the initial ATTACH_CONTAINER_INPUT
            message for streaming. We have a separate generator for this so
            that we can attempt the connection once before committing to a
            persistent connection where we stream the rest of the input.

            :returns: A RecordIO encoded message
            """

            message = {
                'type': 'ATTACH_CONTAINER_INPUT',
                'attach_container_input': {
                    'type': 'CONTAINER_ID',
                    'container_id': {
                        'parent': self.parent_id,
                        'value': self.container_id
                    }
                }
            }

            yield self.encoder.encode(message)

        def _input_streamer():
            """Generator function yielding ATTACH_CONTAINER_INPUT
            messages for streaming. It yields the _intitial_input_streamer()
            message, followed by messages from the input_queue on each
            subsequent call.

            :returns: A RecordIO encoded message
            """

            yield next(_initial_input_streamer())

            while True:
                record = self.input_queue.get()
                if not record:
                    break
                yield record

        req_extra_args = {
            'headers': {
                'Content-Type': 'application/recordio',
                'Message-Content-Type': 'application/json',
                'Accept': 'application/json',
                'Connection': 'close',
                'Transfer-Encoding': 'chunked'
            }
        }

        # Ensure we don't try to attach our input to a container that isn't
        # fully up and running by waiting until the
        # `_process_output_stream` function signals us that it's ready.
        self.attach_input_event.wait()

        # Send an intial "Test" message to ensure that we are able to
        # establish a connection with the agent. If we aren't we will throw
        # an exception and break out of this thread. However, in cases where
        # we receive a 500 response from the agent, we actually want to
        # continue without throwing an exception. A 500 error indicates that
        # we can't connect to the container because it has already finished
        # running. In that case we continue running to allow the output queue
        # to be flushed.
        try:
            http.post(self.agent_url,
                      data=_initial_input_streamer(),
                      **req_extra_args)
        except DCOSHTTPException as e:
            if not e.response.status_code == 500:
                raise e

        # If we succeeded with that connection, unblock process_output_stream()
        # from sending output data to the output thread.
        self.print_output_event.set()

        # Begin streaming the input.
        http.post(self.agent_url,
                  data=_input_streamer(),
                  timeout=None,
                  **req_extra_args)

    def _input_thread(self):
        """Reads from STDIN and places a message
        with that data onto the input_queue.
        """

        message = {
            'type': 'ATTACH_CONTAINER_INPUT',
            'attach_container_input': {
                'type': 'PROCESS_IO',
                'process_io': {
                    'type': 'DATA',
                    'data': {
                        'type': 'STDIN',
                        'data': ''
                    }
                }
            }
        }

        for chunk in iter(partial(os.read, sys.stdin.fileno(), 1024), b''):
            message['attach_container_input']['process_io']['data'][
                'data'] = base64.b64encode(chunk).decode('utf-8')

            self.input_queue.put(self.encoder.encode(message))

        # Push an empty string to indicate EOF to the server and push
        # 'None' to signal that we are done processing input.
        message['attach_container_input']['process_io']['data']['data'] = ''
        self.input_queue.put(self.encoder.encode(message))
        self.input_queue.put(None)

    def _output_thread(self):
        """Reads from the output_queue and writes the data
        to the appropriate STDOUT or STDERR.
        """

        while True:
            # Get a message from the output queue and decode it.
            # Then write the data to the appropriate stdout or stderr.
            output = self.output_queue.get()
            if not output.get('data'):
                raise DCOSException("Error no 'data' field in output message")

            data = output['data']
            data = base64.b64decode(data.encode('utf-8'))

            if output.get('type') and output['type'] == 'STDOUT':
                sys.stdout.buffer.write(data)
                sys.stdout.flush()
            elif output.get('type') and output['type'] == 'STDERR':
                sys.stderr.buffer.write(data)
                sys.stderr.flush()
            else:
                raise DCOSException("Unsupported data type in output stream")

            self.output_queue.task_done()

    def _heartbeat_thread(self):
        """Generates a heartbeat message to send over the
        ATTACH_CONTAINER_INPUT stream every `interval` seconds and
        inserts it in the input queue.
        """

        interval = self.HEARTBEAT_INTERVAL
        nanoseconds = self.HEARTBEAT_INTERVAL_NANOSECONDS

        message = {
            'type': 'ATTACH_CONTAINER_INPUT',
            'attach_container_input': {
                'type': 'PROCESS_IO',
                'process_io': {
                    'type': 'CONTROL',
                    'control': {
                        'type': 'HEARTBEAT',
                        'heartbeat': {
                            'interval': {
                                'nanoseconds': nanoseconds
                            }
                        }
                    }
                }
            }
        }

        while True:
            self.input_queue.put(self.encoder.encode(message))
            time.sleep(interval)

    def _window_resize(self, signum, frame):
        """Signal handler for SIGWINCH.

        Generates a message with the current demensions of the
        terminal and puts it in the input_queue.

        :param signum: the signal number being handled
        :type signum: int
        :param frame: current stack frame
        :type frame: frame
        """

        # Determine the size of our terminal, and create the message to be sent
        rows, columns = os.popen('stty size', 'r').read().split()

        message = {
            'type': 'ATTACH_CONTAINER_INPUT',
            'attach_container_input': {
                'type': 'PROCESS_IO',
                'process_io': {
                    'type': 'CONTROL',
                    'control': {
                        'type': 'TTY_INFO',
                        'tty_info': {
                            'window_size': {
                                'rows': int(rows),
                                'columns': int(columns)
                            }
                        }
                    }
                }
            }
        }

        self.input_queue.put(self.encoder.encode(message))
コード例 #32
0
ファイル: execute.py プロジェクト: katbeaulieu/galaxy-1
def execute(trans,
            tool,
            param_combinations,
            history,
            rerun_remap_job_id=None,
            collection_info=None,
            workflow_invocation_uuid=None):
    """
    Execute a tool and return object containing summary (output data, number of
    failures, etc...).
    """
    all_jobs_timer = ExecutionTimer()
    execution_tracker = ToolExecutionTracker(tool, param_combinations,
                                             collection_info)
    app = trans.app
    execution_cache = ToolExecutionCache(trans)

    def execute_single_job(params):
        job_timer = ExecutionTimer()
        if workflow_invocation_uuid:
            params['__workflow_invocation_uuid__'] = workflow_invocation_uuid
        elif '__workflow_invocation_uuid__' in params:
            # Only workflow invocation code gets to set this, ignore user supplied
            # values or rerun parameters.
            del params['__workflow_invocation_uuid__']
        job, result = tool.handle_single_execution(trans, rerun_remap_job_id,
                                                   params, history,
                                                   collection_info,
                                                   execution_cache)
        if job:
            message = EXECUTION_SUCCESS_MESSAGE % (tool.id, job.id, job_timer)
            log.debug(message)
            execution_tracker.record_success(job, result)
        else:
            execution_tracker.record_error(result)

    config = app.config
    burst_at = getattr(config, 'tool_submission_burst_at', 10)
    burst_threads = getattr(config, 'tool_submission_burst_threads', 1)

    tool_action = tool.action
    if hasattr(tool_action, "check_inputs_ready"):
        for params in execution_tracker.param_combinations:
            # This will throw an exception if the tool is not ready.
            tool_action.check_inputs_ready(tool, trans, params, history)

    job_count = len(execution_tracker.param_combinations)
    if job_count < burst_at or burst_threads < 2:
        for params in execution_tracker.param_combinations:
            execute_single_job(params)
    else:
        q = Queue()

        def worker():
            while True:
                params = q.get()
                execute_single_job(params)
                q.task_done()

        for i in range(burst_threads):
            t = Thread(target=worker)
            t.daemon = True
            t.start()

        for params in execution_tracker.param_combinations:
            q.put(params)

        q.join()

    log.debug("Executed %d job(s) for tool %s request: %s" %
              (job_count, tool.id, all_jobs_timer))
    if collection_info:
        history = history or tool.get_default_history_by_trans(trans)
        if len(param_combinations) == 0:
            template = "Attempting to map over an empty collection, this is not yet implemented. colleciton_info is [%s]"
            message = template % collection_info
            log.warn(message)
            raise Exception(message)
        params = param_combinations[0]
        execution_tracker.create_output_collections(trans, history, params)

    return execution_tracker
コード例 #33
0
class RisMonolith_v1_0_0(Dictable):
    """Monolithic cache of RIS data"""
    def __init__(self, client):
        """Initialize RisMonolith

        :param client: client to utilize
        :type client: RmcClient object

        """
        self._client = client
        self.name = "Monolithic output of RIS Service"
        self.types = OrderedDict()
        self._visited_urls = list()
        self._current_location = '/' # "root"
        self.queue = Queue()
        self._type = None
        self._name = None
        self.progress = 0
        self.reload = False

        self._typestring = '@odata.type'
        self._hrefstring = '@odata.id'

    def _get_type(self):
        """Return monolith version type"""
        return "Monolith.1.0.0"

    type = property(_get_type, None)

    def update_progress(self):
        """Simple function to increment the dot progress"""
        if self.progress % 6 == 0:
            sys.stdout.write('.')

    def get_visited_urls(self):
        """Return the visited URLS"""
        return self._visited_urls

    def set_visited_urls(self, visited_urls):
        """Set visited URLS to given list."""
        self._visited_urls = visited_urls

    def load(self, path=None, includelogs=False, skipinit=False, \
                        skipcrawl=False, loadtype='href', loadcomplete=False):
        """Walk entire RIS model and cache all responses in self.

        :param path: path to start load from.
        :type path: str.
        :param includelogs: flag to determine if logs should be downloaded also.
        :type includelogs: boolean.
        :param skipinit: flag to determine if first run of load.
        :type skipinit: boolean.
        :param skipcrawl: flag to determine if load should traverse found links.
        :type skipcrawl: boolean.
        :param loadtype: flag to determine if load is meant for only href items.
        :type loadtype: str.
        :param loadcomplete: flag to download the entire monolith
        :type loadcomplete: boolean

        """
        if not skipinit:
            if LOGGER.getEffectiveLevel() == 40:
                sys.stdout.write("Discovering data...")
            else:
                LOGGER.info("Discovering data...")
            self.name = self.name + ' at %s' % self._client.base_url

            if not self.types:
                self.types = OrderedDict()

        if not threading.active_count() >= 6:
            for _ in range(5):
                workhand = SuperDuperWorker(self.queue)
                workhand.setDaemon(True)
                workhand.start()

        selectivepath = path
        if not selectivepath:
            selectivepath = self._client._rest_client.default_prefix

        self._load(selectivepath, skipcrawl=skipcrawl, includelogs=includelogs,\
             skipinit=skipinit, loadtype=loadtype, loadcomplete=loadcomplete)
        self.queue.join()

        if not skipinit:
            if LOGGER.getEffectiveLevel() == 40:
                sys.stdout.write("Done\n")
            else:
                LOGGER.info("Done\n")

    def _load(self, path, skipcrawl=False, originaluri=None, includelogs=False,\
                        skipinit=False, loadtype='href', loadcomplete=False):
        """Helper function to main load function.

        :param path: path to start load from.
        :type path: str.
        :param skipcrawl: flag to determine if load should traverse found links.
        :type skipcrawl: boolean.
        :param originaluri: variable to assist in determining originating path.
        :type originaluri: str.
        :param includelogs: flag to determine if logs should be downloaded also.
        :type includelogs: boolean.
        :param skipinit: flag to determine if first run of load.
        :type skipinit: boolean.
        :param loadtype: flag to determine if load is meant for only href items.
        :type loadtype: str.
        :param loadcomplete: flag to download the entire monolith
        :type loadcomplete: boolean

        """
        if path.endswith("?page=1"):
            return
        elif not includelogs:
            if "/Logs/" in path:
                return

        #TODO: need to find a better way to support non ascii characters
        path = path.replace("|", "%7C")

        #remove fragments
        newpath = urlparse(path)
        newpath = list(newpath[:])
        newpath[-1] = ''

        path = urlunparse(tuple(newpath))

        LOGGER.debug('_loading %s', path)

        if not self.reload:
            if path.lower() in self._visited_urls:
                return

        resp = self._client.get(path)

        if resp.status != 200:
            path = path + '/'
            resp = self._client.get(path)

            if resp.status == 401:
                raise SessionExpiredRis("Invalid session. Please logout and "\
                                        "log back in or include credentials.")
            elif resp.status != 200:
                return

        self.queue.put((resp, path, skipinit, self))

        if loadtype == 'href':
            #follow all the href attributes
            jsonpath_expr = jsonpath_rw.parse("$..'@odata.id'")
            matches = jsonpath_expr.find(resp.dict)

            if 'links' in resp.dict and 'NextPage' in resp.dict['links']:
                if originaluri:
                    next_link_uri = originaluri + '?page=' + \
                                    str(resp.dict['links']['NextPage']['page'])
                    href = '%s' % next_link_uri

                    self._load(href, originaluri=originaluri, \
                               includelogs=includelogs, skipcrawl=skipcrawl, \
                               skipinit=skipinit)
                else:
                    next_link_uri = path + '?page=' + \
                                    str(resp.dict['links']['NextPage']['page'])

                    href = '%s' % next_link_uri
                    self._load(href, originaluri=path, includelogs=includelogs,\
                                        skipcrawl=skipcrawl, skipinit=skipinit)

            if not skipcrawl:
                for match in matches:
                    if str(match.full_path) == "*****@*****.**" or \
                            str(match.full_path) == "*****@*****.**":
                        continue

                    if match.value == path:
                        continue

                    href = '%s' % match.value
                    self._load(href, skipcrawl=skipcrawl, \
                           originaluri=originaluri, includelogs=includelogs, \
                           skipinit=skipinit)

            if loadcomplete:
                for match in matches:
                    self._load(match.value, skipcrawl=skipcrawl, originaluri=\
                       originaluri, includelogs=includelogs, skipinit=skipinit)

    def branch_worker(self, resp, path, skipinit):
        """Helper for load function, creates threaded worker

        :param resp: response received.
        :type resp: str.
        :param path: path correlating to the response.
        :type path: str.
        :param skipinit: flag to determine if progress bar should be updated.
        :type skipinit: boolean.

        """
        self._visited_urls.append(path.lower())

        member = RisMonolithMember_v1_0_0(resp)
        if not member.type:
            return

        self.update_member(member)

        if not skipinit:
            self.progress += 1
            if LOGGER.getEffectiveLevel() == 40:
                self.update_progress()

    def update_member(self, member):
        """Adds member to this monolith. If the member already exists the"""
        """ data is updated in place.

        :param member: Ris monolith member object made by branch worker.
        :type member: RisMonolithMember_v1_0_0.

        """
        if member.maj_type not in self.types:
            self.types[member.maj_type] = OrderedDict()
            self.types[member.maj_type]['Instances'] = list()

        found = False

        for indices in range(len(self.types[member.maj_type]['Instances'])):
            inst = self.types[member.maj_type]['Instances'][indices]

            if inst.resp.request.path == member.resp.request.path:
                self.types[member.maj_type]['Instances'][indices] = member
                self.types[member.maj_type]['Instances'][indices].patches.\
                                    extend([patch for patch in inst.patches])

                found = True
                break

        if not found:
            self.types[member.maj_type]['Instances'].append(member)

    def load_from_dict(self, src):
        """Load data to monolith from dict

        :param src: data receive from rest operation.
        :type src: str.

        """
        self._type = src['Type']
        self._name = src['Name']
        self.types = OrderedDict()

        for typ in src['Types']:
            for inst in typ['Instances']:
                member = RisMonolithMember_v1_0_0(None)
                member.load_from_dict(inst)
                self.update_member(member)

        return

    def to_dict(self):
        """Convert data to monolith from dict"""
        result = OrderedDict()
        result['Type'] = self.type
        result['Name'] = self.name
        types_list = list()

        for typ in list(self.types.keys()):
            type_entry = OrderedDict()
            type_entry['Type'] = typ
            type_entry['Instances'] = list()

            for inst in self.types[typ]['Instances']:
                type_entry['Instances'].append(inst.to_dict())

            types_list.append(type_entry)

        result['Types'] = types_list
        return result

    def reduce(self):
        """Reduce monolith data"""
        result = OrderedDict()
        result['Type'] = self.type
        result['Name'] = self.name
        types_list = list()

        for typ in list(self.types.keys()):
            type_entry = OrderedDict()
            type_entry['Type'] = typ

            for inst in self.types[typ]['Instances']:
                type_entry['Instances'] = inst.reduce()

            types_list.append(type_entry)

        result['Types'] = types_list
        return result

    def _jsonpath2jsonpointer(self, instr):
        """Convert json path to json pointer

        :param instr: input path to be converted to pointer.
        :type instr: str.

        """
        outstr = instr.replace('.[', '[')
        outstr = outstr.replace('[', '/')
        outstr = outstr.replace(']', '/')

        if outstr.endswith('/'):
            outstr = outstr[:-1]

        return outstr

    def _get_current_location(self):
        """Return current location"""
        return self._current_location

    def _set_current_location(self, newval):
        """Set current location"""
        self._current_location = newval

    location = property(_get_current_location, _set_current_location)

    def list(self, lspath=None):
        """Function for list command

        :param lspath: path list.
        :type lspath: list.

        """
        results = list()
        path_parts = ['Types'] # Types is always assumed

        if isinstance(lspath, list) and len(lspath) > 0:
            lspath = lspath[0]
            path_parts.extend(lspath.split('/'))
        elif not lspath:
            lspath = '/'
        else:
            path_parts.extend(lspath.split('/'))

        currpos = self.to_dict()
        for path_part in path_parts:
            if not path_part:
                continue

            if isinstance(currpos, RisMonolithMember_v1_0_0):
                break
            elif isinstance(currpos, dict) and path_part in currpos:
                currpos = currpos[path_part]
            elif isinstance(currpos, list):
                for positem in currpos:
                    if 'Type' in positem and path_part == positem['Type']:
                        currpos = positem
                        break

        results.append(currpos)

        return results

    def killthreads(self):
        """Function to kill threads on logout"""
        threads = []
        for thread in threading.enumerate():
            if isinstance(thread, SuperDuperWorker):
                self.queue.put(('KILL', 'KILL', 'KILL', 'KILL'))
                threads.append(thread)

        for thread in threads:
            thread.join()
コード例 #34
0
ファイル: auto_dataset.py プロジェクト: aetros/aetros-cli
def get_images(job_model, dataset, node, trainer):
    concurrent = 15

    from PIL import ImageFile
    if hasattr(ImageFile, 'LOAD_TRUNCATED_IMAGES'):
        ImageFile.LOAD_TRUNCATED_IMAGES = True


    q = Queue(concurrent)
    config = dataset['config']

    dir = job_model.get_dataset_downloads_dir(dataset)

    ensure_dir(dir)

    if 'classes' not in config or not config['classes']:
        trainer.logger.warning("Dataset %s does not contain any classes." % (dataset['id'],))
        return {
            'X_train': np.array([]),
            'Y_train': np.array([]),
            'X_test': np.array([]),
            'Y_test': np.array([])
        }

    classes = config['classes']

    trainer.set_status('LOAD IMAGES')

    max = 0
    images = {}

    dataset_path = job_model.get_dataset_downloads_dir(dataset)
    meta_information_file = dataset_path + '/meta.json'

    classes_changed = False
    config_changed = False
    had_previous = False
    classes_md5 = hashlib.md5(simplejson.dumps(classes, default=invalid_json_values, sort_keys=True).encode('utf-8')).hexdigest()

    validationFactor = 0.2

    meta = {}
    if os.path.isdir(dataset_path):
        if os.path.isfile(meta_information_file):
            with open(meta_information_file) as f:
                meta = simplejson.load(f)
                if meta:
                    had_previous = True
                    if 'classes_md5' in meta and meta['classes_md5'] != classes_md5:
                        classes_changed = True

                    trigger_changed = ['resize', 'resizeWidth', 'resizeHeight', 'resizeCompression']
                    for i in trigger_changed:
                        if i in meta['config'] and i in config and meta['config'][i] != config[i]:
                            config_changed = True
                else:
                    config_changed = True
        else:
            config_changed = True

    need_download = classes_changed or config_changed

    if need_download:

        if had_previous:
            trainer.logger.info("Reset dataset and re-download images to " + dir)
            if classes_changed:
                trainer.logger.info(" .. because classes changed in", meta['classes_md5'], classes_md5, meta_information_file)
            if config_changed:
                trainer.logger.info(" .. because settings changed in", meta_information_file)
        else:
            trainer.logger.info("Download images to " + dir)

        resize = bool(get_option(config, 'resize', True))
        if resize:
            resizeSize = (int(get_option(config, 'resizeWidth', 64)),
                          int(get_option(config, 'resizeHeight', 64)))
            trainer.logger.info(" .. with resizing to %dx%d " % resizeSize)

        # # we need to donwload all images
        shutil.rmtree(dataset_path)

        controller = {'running': True}
        try:
            for category in classes:
                max += len(category['images'])

            progress = trainer.job_backend.create_progress('dataset-download-images', max)
            progress.label('Download dataset images')

            for i in range(concurrent):
                t = ImageDownloaderWorker(q, progress, dataset, max, images, controller)
                t.daemon = True
                t.start()

            for category_idx, category in enumerate(classes):
                for image in category['images']:
                    local_name = image['id']
                    local_path = '%s/%s' % (trainer.job_model.get_dataset_downloads_dir(dataset), local_name)
                    q.put([image, category_idx, local_path])

            q.join()
            controller['running'] = False

            def move_image(image, category='training'):
                if image['id'] in images and os.path.isfile(images[image['id']]):
                    target_path = dataset_path + \
                        '/%s/category_%s/%s' % (category, category_idx,
                                                os.path.basename(images[image['id']]))
                    ensure_dir(os.path.dirname(target_path))
                    os.rename(images[image['id']], target_path)

            for category_idx, category in enumerate(classes):
                random.shuffle(category['images'])
                position = int(math.ceil(len(category['images']) * validationFactor))

                ensure_dir(dataset_path + '/training')
                ensure_dir(dataset_path + '/validation')

                for image in category['images'][position:]:  # test data
                    if image['id'] in images and os.path.isfile(images[image['id']]):
                        move_image(image, 'training')

                for image in category['images'][:position]:  # validation data
                    if image['id'] in images and os.path.isfile(images[image['id']]):
                        move_image(image, 'validation')

            with open(meta_information_file, 'w') as f:
                meta = {
                    'loaded_at': classes_md5,
                    'classes_md5': classes_md5,
                    'config': config
                }
                simplejson.dump(meta, f, default=invalid_json_values)

        except KeyboardInterrupt:
            controller['running'] = False
            sys.exit(1)
    else:
        trainer.logger.info("Downloaded images up2date in " + dir)
        trainer.logger.info(" - Remove this directory if you want to re-download all images of your dataset and re-shuffle training/validation images.")

    trainer.output_size = len(classes)

    # change to type local_images
    dataset_transformed = dataset.copy()
    dataset_transformed['config']['path'] = dir

    all_memory = get_option(dataset['config'], 'allMemory', False, 'bool')

    if all_memory:
        return read_images_in_memory(job_model, dataset_transformed, node, trainer)
    else:
        return read_images_keras_generator(job_model, dataset_transformed, node, trainer)
コード例 #35
0
ファイル: _bulk.py プロジェクト: vaibhavmule/pyfilesystem2
class Copier(object):
    """Copy files in worker threads."""
    def __init__(self, num_workers=4):
        # type: (int) -> None
        if num_workers < 0:
            raise ValueError("num_workers must be >= 0")
        self.num_workers = num_workers
        self.queue = None  # type: Optional[Queue[_Task]]
        self.workers = []  # type: List[_Worker]
        self.errors = []  # type: List[Exception]
        self.running = False

    def start(self):
        """Start the workers."""
        if self.num_workers:
            self.queue = Queue(maxsize=self.num_workers)
            self.workers = [_Worker(self) for _ in range(self.num_workers)]
            for worker in self.workers:
                worker.start()
        self.running = True

    def stop(self):
        """Stop the workers (will block until they are finished)."""
        if self.running and self.num_workers:
            for worker in self.workers:
                self.queue.put(None)
            for worker in self.workers:
                worker.join()
            # Free up references help by workers
            del self.workers[:]
            self.queue.join()
        self.running = False

    def add_error(self, error):
        """Add an exception raised by a task."""
        self.errors.append(error)

    def __enter__(self):
        self.start()
        return self

    def __exit__(
            self,
            exc_type,  # type: Optional[Type[BaseException]]
            exc_value,  # type: Optional[BaseException]
            traceback,  # type: Optional[TracebackType]
    ):
        self.stop()
        if traceback is None and self.errors:
            raise BulkCopyFailed(self.errors)

    def copy(self, src_fs, src_path, dst_fs, dst_path):
        # type: (FS, Text, FS, Text) -> None
        """Copy a file from one fs to another."""
        if self.queue is None:
            # This should be the most performant for a single-thread
            copy_file_internal(src_fs, src_path, dst_fs, dst_path)
        else:
            src_file = src_fs.openbin(src_path, "r")
            try:
                dst_file = dst_fs.openbin(dst_path, "w")
            except Exception:
                src_file.close()
                raise
            task = _CopyTask(src_file, dst_file)
            self.queue.put(task)
コード例 #36
0
class Rigger(object):
    """ A Rigger event framework instance.

    The Rigger object holds all configuration and instances of plugins. By default Rigger accepts
    a configuration file name to parse, though it is perfectly acceptable to pass the configuration
    into the ``self.config`` attribute.

    Args:
        config_file: A configuration file holding all of Riggers base and plugin configuration.
    """
    def __init__(self, config_file):
        self.gdl = threading.Lock()
        self.pre_callbacks = defaultdict(dict)
        self.post_callbacks = defaultdict(dict)
        self.plugins = {}
        self.config_file = config_file
        self.squash_exceptions = False
        self.initialized = False
        self._task_list = {}
        self._queue_lock = threading.Lock()
        self._global_queue = Queue()
        self._background_queue = Queue()
        self._server_shutdown = False
        self._zmq_event_handler_shutdown = False
        self._global_queue_shutdown = False
        self._background_queue_shutdown = False

        globt = threading.Thread(target=self.process_queue,
                                 name="global_queue_processor")
        globt.start()
        bgt = threading.Thread(target=self.process_background_queue,
                               name="background_queue_processor")
        bgt.start()

    def process_queue(self):
        """
        The ``process_queue`` thread manages taking events on and off of the global queue.
        Both TCP and in-object fire_hooks place events onto the global_queue and these are both
        handled by the same handler called ``process_hook``. If there is an exception during
        processing, the exception is printed and execution continues.
        """
        while not self._global_queue_shutdown:
            while not self._global_queue.empty():
                with self._queue_lock:
                    tid = self._global_queue.get()
                    obj = self._task_list[tid].json_dict
                    self._task_list[tid].status = Task.RUNNING
                try:
                    loc, glo = self.process_hook(obj['hook_name'],
                                                 **obj['data'])
                    combined_dict = {}
                    combined_dict.update(glo)
                    combined_dict.update(loc)
                    self._task_list[tid].output = combined_dict
                except Exception as e:
                    self.log_message(e)
                with self._queue_lock:
                    self._global_queue.task_done()
                    self._task_list[tid].status = Task.FINISHED
                if not self._task_list[tid].json_dict.get('grab_result', None):
                    del self._task_list[tid]
            time.sleep(0.1)

    def process_background_queue(self):
        """
        The ``process_background_queue`` manages the hooks which have been backgrounded. In this
        respect the tasks that are completed are not required to continue with the test and as such
        can be forgotten about. An example of this would be some that sends an email, or tars up
        files, it has all the information it needs and the main process doesn't need to wait for it
        to complete.
        """
        while not self._background_queue_shutdown:
            while not self._background_queue.empty():
                obj = self._background_queue.get()
                try:
                    local, globals_updates = self.process_callbacks(
                        obj['cb'], obj['kwargs'])
                    with self.gdl:
                        self.global_data = recursive_update(
                            self.global_data, globals_updates)
                except Exception as e:
                    self.log_message(e)
                self._background_queue.task_done()
            time.sleep(0.1)

    def zmq_event_handler(self, zmq_socket_address):
        """
        The ``zmq_event_handler`` thread receives (and responds to) updates from the
        zmq socket, which is normally embedded in the web server running alongside this
        riggerlib instance, in its own process.

        """
        ctx = zmq.Context()
        zmq_socket = ctx.socket(zmq.REP)
        zmq_socket.set(zmq.RCVTIMEO, 300)
        zmq_socket.bind(zmq_socket_address)

        def zmq_reply(message, **extra):
            payload = {'message': message}
            payload.update(extra)
            zmq_socket.send_json(payload)

        bad_request = partial(zmq_reply, 'BAD REQUEST')

        while not self._zmq_event_handler_shutdown:
            try:
                json_dict = zmq_socket.recv_json()
            except zmq.Again:
                continue

            try:
                event_name = json_dict['event_name']
            except KeyError:
                bad_request()

            if event_name == 'fire_hook':
                tid = self._fire_internal_hook(json_dict)
                if tid:
                    zmq_reply('OK', tid=tid)
                else:
                    bad_request()
            elif event_name == 'task_check':
                try:
                    tid = json_dict['tid']
                    extra = {
                        "tid": tid,
                        "status": self._task_list[tid].status,
                    }
                    if json_dict['grab_result']:
                        extra["output"] = self._task_list[tid].output
                    zmq_reply('OK', **extra)
                except KeyError:
                    zmq_reply('NOT FOUND')
            elif event_name == 'task_delete':
                try:
                    tid = json_dict['tid']
                    del self._task_list[tid]
                    zmq_reply('OK', tid=tid)
                except KeyError:
                    zmq_reply('OK', tid=tid)
            elif event_name == 'shutdown':
                zmq_reply('OK')
                # We gotta initiate server stop from here and stop this thread
                self._server_shutdown = True
                break
            elif event_name == 'ping':
                zmq_reply('PONG')
            else:
                bad_request()

        zmq_socket.close()

    def read_config(self, config_file):
        """
        Reads in the config file and parses the yaml data.

        Args:
            config_file: A configuration file holding all of Riggers base and plugin configuration.

        Raises:
            IOError: If the file can not be read.
            Exception: If there is any error parsing the configuration file.
        """
        try:
            with open(config_file, "r") as stream:
                data = yaml.load(stream)
        except IOError:
            print("!!! Configuration file could not be loaded...exiting")
            sys.exit(127)
        except Exception as e:
            print(e)
            print("!!! Error parsing Configuration file")
            sys.exit(127)
        self.config = data

    def parse_config(self):
        """
        Takes the configuration data from ``self.config`` and sets up the plugin instances.
        """
        self.read_config(self.config_file)
        self.setup_plugin_instances()
        self.start_server()

    def setup_plugin_instances(self):
        """
        Sets up instances into a dict called ``self.instances`` and instantiates each
        instance of the plugin. It also sets the ``self._threaded`` option to determine
        if plugins will be processed synchronously or asynchronously.
        """
        self.instances = {}
        self._threaded = self.config.get("threaded", False)
        plugins = self.config.get("plugins", {})
        for ident, config in plugins.items():
            self.setup_instance(ident, config)

    def setup_instance(self, ident, config):
        """
        Sets up a single instance into the ``self.instances`` dict. If the instance does
        not exist, a warning is printed out.

        Args:
            ident: A plugin instance identifier.
            config: Configuration dict from the yaml.
        """
        plugin_name = config.get('plugin', {})
        if plugin_name in self.plugins:
            obj = self.plugins[plugin_name]
            if obj:
                obj_instance = obj(ident, config, self)
                self.instances[ident] = RiggerPluginInstance(
                    ident, obj_instance, config)
        else:
            msg = "Plugin [{}] was not found, "\
                  "disabling instance [{}]".format(plugin_name, ident)
            self.log_message(msg)

    def start_server(self):
        """
        Starts the ZMQ server if the ``server_enabled`` is True in the config.
        """
        self._server_hostname = self.config.get('server_address', '127.0.0.1')
        self._server_port = self.config.get('server_port', 21212)
        self._server_enable = self.config.get('server_enabled', False)
        if self._server_enable:
            zmq_socket_address = 'tcp://{}:{}'.format(self._server_hostname,
                                                      self._server_port)
            # set up reciever thread for zmq event handling
            zeh = threading.Thread(target=self.zmq_event_handler,
                                   args=(zmq_socket_address, ),
                                   name="zmq_event_handler")
            zeh.start()
            exect = threading.Thread(target=self.await_shutdown,
                                     name="executioner")
            exect.start()

    def await_shutdown(self):
        while not self._server_shutdown:
            time.sleep(0.3)
        self.stop_server()

    def stop_server(self):
        """
        Responsible for the following:
            - stopping the zmq event handler (unless already stopped through 'terminate')
            - stopping the global queue
            - stopping the background queue
        """
        self.log_message("Shutdown initiated : {}".format(
            self._server_hostname))
        # The order here is important
        self._zmq_event_handler_shutdown = True
        self._global_queue.join()
        self._global_queue_shutdown = True
        self._background_queue.join()
        self._background_queue_shutdown = True
        raise SystemExit

    def fire_hook(self, hook_name, **kwargs):
        """
        Parses the hook information into a dict for passing to process_hook. This is used
        to enable both the TCP and in-object fire_hook methods to use the same process_hook
        method call.

        Args:
            hook_name: The name of the hook to fire.
            kwargs: The kwargs to pass to the hooks.

        """
        json_dict = {'hook_name': hook_name, 'data': kwargs}
        self._fire_internal_hook(json_dict)

    def _fire_internal_hook(self, json_dict):
        task = Task(json_dict)
        tid = task.tid.hexdigest()
        self._task_list[tid] = task
        if self._global_queue:
            with self._queue_lock:
                self._global_queue.put(tid)
            return tid
        else:
            return None

    def process_hook(self, hook_name, **kwargs):
        """
        Takes a hook_name and a selection of kwargs and fires off the appropriate callbacks.

        This function is the guts of Rigger and is responsible for running the callback and
        hook functions. It first loads some blank dicts to collect the updates for the local
        and global namespaces. After this, it loads the pre_callback functions along with
        the kwargs into the callback collector processor.

        The return values are then classifed into local and global dicts and updates proceed.
        After this, the plugin hooks themselves are then run using the same methodology. Their
        return values are merged with the existing dicts and then the same process happens
        for the post_callbacks.

        Note: If the instance of the plugin has been marked as a background instance, and hooks
              which are called in that instance will be backgrounded. The hook will also not
              be able to return any data to the post-hook callback, although updates to globals
              will be processed as and when the backgrounded task is completed.

        Args:
            hook_name: The name of the hook to fire.
            kwargs: The kwargs to pass to the hooks.
        """
        if not self.initialized:
            return
        kwargs_updates = {}
        globals_updates = {}
        kwargs.update({'config': self.config})

        # First fire off any pre-hook callbacks
        if self.pre_callbacks.get(hook_name):
            # print "Running pre hook callback for {}".format(hook_name)
            kwargs_updates, globals_updates = self.process_callbacks(
                self.pre_callbacks[hook_name].values(), kwargs)

            # Now we can update the kwargs passed to the real hook with the updates
            with self.gdl:
                self.global_data = recursive_update(self.global_data,
                                                    globals_updates)
            kwargs = recursive_update(kwargs, kwargs_updates)

        # Now fire off each plugin hook
        event_hooks = []
        for instance_name, instance in self.instances.items():
            callbacks = instance.obj.callbacks
            enabled = instance.data.get('enabled', None)
            if callbacks.get(hook_name) and enabled:
                cb = callbacks[hook_name]
                if instance.data.get('background', False):
                    self._background_queue.put({'cb': [cb], 'kwargs': kwargs})
                elif cb['bg']:
                    self._background_queue.put({'cb': [cb], 'kwargs': kwargs})
                else:
                    event_hooks.append(cb)
        kwargs_updates, globals_updates = self.process_callbacks(
            event_hooks, kwargs)

        # One more update for the post_hook callback
        with self.gdl:
            self.global_data = recursive_update(self.global_data,
                                                globals_updates)
        kwargs = recursive_update(kwargs, kwargs_updates)

        # Finally any post-hook callbacks
        if self.post_callbacks.get(hook_name):
            # print "Running post hook callback for {}".format(hook_name)
            kwargs_updates, globals_updates = self.process_callbacks(
                self.post_callbacks[hook_name].values(), kwargs)
            with self.gdl:
                self.global_data = recursive_update(self.global_data,
                                                    globals_updates)
            kwargs = recursive_update(kwargs, kwargs_updates)
        return kwargs, self.global_data

    def process_callbacks(self, callback_collection, kwargs):
        """
        Processes a collection of callbacks or hooks for a particular event, namely pre, hook or
        post.

        The functions are passed in as an array to ``callback_collection`` and process callbacks
        first iterates each function and ensures that each one has the correct arguments available
        to it. If not, an Exception is raised. Then, depending on whether Threading is enabled or
        not, the functions are either run sequentially, or loaded into a ThreadPool and executed
        asynchronously.

        The returned local and global updates are either collected and processed sequentially, as
        in the case of the non-threaded behaviour, or collected at the end of the
        callback_collection processing and handled there.

        Note:
            It is impossible to predict the order of the functions being run. If the order is
            important, it is advised to create a second event hook that will be fired before the
            other. Rigger has no concept of hook or callback order and is unlikely to ever have.

        Args:
            callback_collection: A list of functions to call.
            kwargs: A set of kwargs to pass to the functions.

        Returns: A tuple of local and global namespace updates.
        """
        loc_collect = {}
        glo_collect = {}
        if self._threaded:
            results_list = []
            pool = ThreadPool(10)
        for cb in callback_collection:
            required_args = [
                sig for sig in cb['args']
                if isinstance(cb['args'][sig].default, type)
            ]
            missing = list(
                set(required_args).difference(set(
                    self.global_data.keys())).difference(set(kwargs.keys())))
            if not missing:
                new_kwargs = self.build_kwargs(cb['args'], kwargs)
                if self._threaded:
                    results_list.append(
                        pool.apply_async(cb['func'], [], new_kwargs))
                else:
                    obtain_result = self.handle_results(
                        cb['func'], [], new_kwargs)
                    loc_collect, glo_collect = self.handle_collects(
                        obtain_result, loc_collect, glo_collect)
            else:
                raise Exception('Function {} is missing kwargs {}'.format(
                    cb['func'].__name__, missing))

        if self._threaded:
            pool.close()
            pool.join()
            for result in results_list:
                obtain_result = self.handle_results(result.get, [], {})
                loc_collect, glo_collect = self.handle_collects(
                    obtain_result, loc_collect, glo_collect)
        return loc_collect, glo_collect

    def handle_results(self, call, args, kwargs):
        """
        Handles results and depending on configuration, squashes exceptions and logs or
        returns the obtained result.

        Args:
            call: The function call.
            args: The positional arguments.
            kwargs: The keyword arguments.

        Returns: The obtained result of the callback or hook.
        """
        try:
            obtain_result = call(*args, **kwargs)
        except:
            if self.squash_exceptions:
                obtain_result = None
                self.handle_failure(sys.exc_info())
            else:
                raise

        return obtain_result

    def handle_collects(self, result, loc_collect, glo_collect):
        """
        Handles extracting the information from the hook/callback result.

        If the hook/callback returns None, then the dicts are returned unaltered, else
        they are updated with local, global namespace updates.

        Args:
            result: The result to process.
            loc_collect: The local namespace updates collection.
            glo_collect: The global namespace updates collection.
        Returns: A tuple containing the local and global updates.
        """
        if result:
            if result[0]:
                loc_collect = recursive_update(loc_collect, result[0])
            if result[1]:
                glo_collect = recursive_update(glo_collect, result[1])
        return loc_collect, glo_collect

    def build_kwargs(self, args, kwargs):
        """
        Builds a new kwargs from a list of allowed args.

        Functions only receive a single set of kwargs, and so the global and local namespaces
        have to be collapsed. In this way, the local overrides the global namespace, hence if
        a key exists in both local and global, the local value will be passed to the function
        under the the key name and the global value will be forgotten.

        The args parameter ensures that only the expected arguments are supplied.

        Args:
            args: A list of allowed argument names
            kwargs: A dict of kwargs from the local namespace.
        Returns: A consolidated global/local namespace with local overrides.
        """
        returned_args = {}
        returned_args.update({
            name: self.global_data[name]
            for name in args if name in self.global_data
        })
        returned_args.update(
            {name: kwargs[name]
             for name in args if name in kwargs})
        return returned_args

    def register_hook_callback(self,
                               hook_name=None,
                               ctype="pre",
                               callback=None,
                               name=None):
        """
        Registers pre and post callbacks.

        Takes a callback function and assigns it to the hook_name with an optional identifier.
        The optional identifier makes it possible to hot bind functions into hooks and to
        remove them at a later date with ``unregister_hook_callback``.

        Args:
            hook_name: The name of the event hook to respond to.
            ctype: The call back type, either ``pre`` or ``post``.
            callback: The callback function.
            name: An optional name for the callback instance binding.
        """
        if hook_name and callback:
            callback_instance = self.create_callback(callback)
            if not name:
                name = hashlib.sha1(
                    str(time.time()) + hook_name +
                    str(callback_instance['args'])).hexdigest()
            if ctype == "pre":
                self.pre_callbacks[hook_name][name] = callback_instance
            elif ctype == "post":
                self.post_callbacks[hook_name][name] = callback_instance

    def unregister_hook_callback(self, hook_name, ctype, name):
        """
        Unregisters a pre or post callback.

        If the binding has a known name, this function allows the removal of a binding.

        Args:
            hook_name: The event hook name.
            ctype: The callback type, either ``pre`` or ``post``.
            name: An optional name for the callback instance binding.
        """
        if ctype == "pre":
            del self.pre_callbacks[hook_name][name]
        elif ctype == "post":
            del self.post_callbacks[hook_name][name]

    def register_plugin(self, cls, plugin_name=None):
        """ Registers a plugin class to a name.

        Multiple instances of the same plugin can be used in Rigger, ``self.plugins``
        stores un-initialized class defintions to be used by ``setup_instances``.

        Args:
            cls: The class.
            plugin_name: The name of the plugin.
        """
        if plugin_name in self.plugins:
            print("Plugin name already taken [{}]".format(plugin_name))
        elif plugin_name is None:
            print("Plugin name cannot be None")
        else:
            # print "Registering plugin {}".format(plugin_name)
            self.plugins[plugin_name] = cls

    def get_instance_obj(self, name):
        """
        Gets the instance object for a given ident name.

        Args:
            name: The ident name of the instance.
        Returns: The object of the instance.
        """
        if name in self.instances:
            return self.instances[name].obj
        else:
            return None

    def get_instance_data(self, name):
        """
        Gets the instance data(config) for a given ident name.

        Args:
            name: The ident name of the instance.
        Returns: The data(config) of the instance.
        """
        if name in self.instances:
            return self.instances[name].data
        else:
            return None

    def configure_plugin(self, name, *args, **kwargs):
        """
        Attempts to configure an instance, passing it the args and kwargs.

        Args:
            name: The ident name of the instance.
            args: The positional args.
            kwargs: The keyword arguments.
        """
        obj = self.get_instance_obj(name)
        if obj:
            obj.configure(*args, **kwargs)

    @staticmethod
    def create_callback(callback, bg=False):
        """
        Simple function to inspect a function and return it along with it param names wrapped
        up in a nice dict. This forms a callback object.

        Args:
            callback: The callback function.
        Returns: A dict of function and param names.
        """
        params = signature(callback).parameters
        return {'func': callback, 'args': params, 'bg': bg}

    def handle_failure(self, exc):
        """
        Handles an exception. It is expected that this be overidden.
        """
        self.log_message(exc)

    def log_message(self, message):
        """
        "Logs" a message. It is expected that this be overidden.
        """
        print(message)
コード例 #37
0
class RisMonolith(Dictable):
    """Monolithic cache of RIS data. This takes a :class:`redfish.rest.v1.RestClient` and uses it to
    gather data from a server and saves it in a modifiable database called monolith.

    :param client: client to use for data retrieval. Client is saved as a weakref, using it requires
                   parenthesis and will not survive if the client used in init is removed.
    :type client: :class:`redfish.rest.v1.RestClient`
    :param typepath: The compatibility class to use for differentiating between Redfish/LegacyRest.
    :type typepath: :class:`redfish.rest.ris.Typesandpathdefines`
    :param directory_load: The flag to quick load using resource directory if possible.
           When set to True this will load paths, etags, and types, but not create responses for
           every monolith member. When responses are needed, they will need to be loaded separately.
    :type directory_load: bool
    """
    def __init__(self, client, typepath, directory_load=True):
        self._client = weakref.ref(client)
        self.name = "Monolithic output of RIS Service"
        self._visited_urls = list()
        self._type = None
        self._name = None
        self.progress = 0
        self._directory_load = directory_load
        self.is_redfish = self._client().is_redfish
        self.typesadded = defaultdict(set)
        self.paths = dict()
        self.ctree = defaultdict(set)
        self.colltypes = defaultdict(set)

        self.typepath = typepath
        self.collstr = self.typepath.defs.collectionstring
        self.etagstr = 'ETag'
        if self.is_redfish:
            self._resourcedir = '/redfish/v1/ResourceDirectory/'
        else:
            self._resourcedir = '/rest/v1/ResourceDirectory'

        #MultiThreading
        self.get_queue = Queue()
        self.threads = []

    @property
    def directory_load(self):
        """The flag to gather information about a tree without downloading every path. Only usable
        on HPE systems with a ResourceDirectory. type"""
        return self._directory_load

    @directory_load.setter
    def directory_load(self, dir_load):
        """Set the directory_load flag"""
        self._directory_load = dir_load

    @property
    def type(self):
        """Return monolith version type"""
        return "Monolith.1.0.0"

    @property
    def visited_urls(self):
        """The urls visited by the monolith"""
        return list(set(self._visited_urls) | set(self.paths.keys()))

    @visited_urls.setter
    def visited_urls(self, visited_urls):
        """Set visited URLS."""
        self._visited_urls = visited_urls

    @property
    def types(self):
        """Returns list of types for members in the monolith

        :rtype: list
        """
        return list(self.typesadded.keys())

    @types.setter
    def types(self, member):
        """Adds a member to monolith

        :param member: Member created based on response.
        :type member: RisMonolithMemberv100
        """
        self.typesadded[member.maj_type].add(member.path)
        patches = []
        if member.path in list(self.paths.keys()):
            patches = self.paths[member.path].patches
        self.paths[member.path] = member
        self.paths[member.path].patches.extend([patch for patch in patches])

    def path(self, path):
        """Provides the member corresponding to the path specified. Case sensitive.

        :param path: path of the monolith member to return
        :type path: str
        :rtype: RisMonolithMemberv100
        """
        try:
            return self.paths[path]
        except:
            return None

    def iter(self, typeval=None):
        """An iterator that yields each member of monolith associated with a specific type. In the
        case that no type is included this will yield all members in the monolith.

        :rtype: RisMonolithMemberv100
        """
        if not typeval:
            for _, val in self.paths.items():
                yield val
        else:
            for typename in self.gettypename(typeval):
                for item in self.typesadded[typename]:
                    yield self.paths[item]
#             types = next(self.gettypename(typeval), None)
#             if types in self.typesadded:
#                 for item in self.typesadded[types]:
#                     yield self.paths[item]
#             else:
#                 raise RisInstanceNotFoundError("Unable to locate instance for" \
#                                                             " '%s'" % typeval)

    def itertype(self, typeval):
        """Iterator that yields member(s) of given type in the monolith and raises an error if no
        member of that type is found.

        :param typeval: type name of the requested member.
        :type typeval: str
        :rtype: RisMonolithMemberv100
        """
        typeiter = self.gettypename(typeval)
        types = next(typeiter, None)
        if types:
            while types:
                for item in self.typesadded[types]:
                    yield self.paths[item]
                types = next(typeiter, None)
        else:
            raise RisInstanceNotFoundError(
                "Unable to locate instance for '%s'" % typeval)

    def typecheck(self, types):
        """Check if a member of given type exists in the monolith

        :param types: type to check.
        :type types: str
        :rtype: bool
        """
        if any(types in val for val in self.types):
            return True
        return False

    def gettypename(self, types):
        """Takes a full type response and returns all major types associated.
        Example: #Type.v1_0_0.Type will return iter(Type.1)

        :param types: The type of the requested response.
        :type types: str
        :rtype: iter of major types
        """
        types = types[1:] if types[0] in ("#", u"#") else types
        return iter(
            (xt for xt in self.types if xt and types.lower() in xt.lower()))

    def update_member(self, member=None, resp=None, path=None, init=True):
        """Adds member to the monolith. If the member already exists the
        data is updated in place. Takes either a RisMonolithMemberv100 instance or a
        :class:`redfish.rest.containers.RestResponse` along with that responses path.

        :param member: The monolith member to add to the monolith.
        :type member: RisMonolithMemberv100
        :param resp: The rest response to add to the monolith.
        :type resp: :class:`redfish.rest.containers.RestResponse`
        :param path: The path correlating to the response.
        :type path: str
        :param init: Flag if addition is part of the initial load. Set this to false if you are
                     calling this by itself.
        :type init: bool
        """
        if not member and resp and path:
            self._visited_urls.append(path.lower())

            member = RisMonolithMemberv100(resp, self.is_redfish)
            if not member:  #Assuming for lack of member and not member.type
                return
            if not member.type:
                member.deftype = 'object'  #Hack for general schema with no type

        self.types = member

        if init:
            self.progress += 1
            if LOGGER.getEffectiveLevel() == 40:
                self._update_progress()

    def load(self, path=None, includelogs=False, init=False, \
            crawl=True, loadtype='href', loadcomplete=False, path_refresh=False):
        """Walks the entire data model and caches all responses or loads an individual path into
        the monolith. Supports both threaded and sequential crawling.

        :param path: The path to start the crawl from the provided path if crawling or
                     loads the path into monolith. If path is not included, crawl will start with
                     the default. The default is */redfish/v1/* or */rest/v1* depending on if the
                     system is Redfish or LegacyRest.
        :type path: str.
        :param includelogs: Flag to determine if logs should be downloaded as well in the crawl.
        :type includelogs: bool
        :param init: Flag to determine if this is the initial load.
        :type init: bool
        :param crawl: Flag to determine if load should crawl through found links.
        :type crawl: bool
        :param loadtype: Flag to determine if loading standard links: *href* or schema links: *ref*.
        :type loadtype: str.
        :param loadcomplete: Flag to download the entire data model including registries and
                             schemas.
        :type loadcomplete: bool
        :param path_refresh: Flag to reload the path specified, clearing any patches and overwriting the
                    current data in the monolith.
        :type path_refresh: bool
        """
        if init:
            if LOGGER.getEffectiveLevel() == 40:
                sys.stdout.write("Discovering data...")
            else:
                LOGGER.info("Discovering data...")
            self.name = self.name + ' at %s' % self._client().base_url

        selectivepath = path
        if not selectivepath:
            selectivepath = self._client().default_prefix
        if loadtype == 'href' and not self._client().base_url.startswith(
                "blobstore://."):
            if not self.threads:
                for _ in range(6):
                    workhand = LoadWorker(self.get_queue)
                    workhand.setDaemon(True)
                    workhand.start()
                    self.threads.append(workhand)

            self.get_queue.put((selectivepath, includelogs, loadcomplete, crawl, \
                                   path_refresh, init, None, None, self))
            self.get_queue.join()

            #Raise any errors from threads, and set them back to None after
            excp = None
            for thread in self.threads:
                if excp == None:
                    excp = thread.get_exception()
                thread.exception = None

            if excp:
                raise excp

            #self.member_queue.join()
        else:
            #We can't load ref or local client in a threaded manner
            self._load(selectivepath, originaluri=None, crawl=crawl, \
                       includelogs=includelogs, init=init, loadtype=loadtype, \
                       loadcomplete=loadcomplete, path_refresh=path_refresh,
                       prevpath=None)

        if init:
            if LOGGER.getEffectiveLevel() == 40:
                sys.stdout.write("Done\n")
            else:
                LOGGER.info("Done\n")
        if self.directory_load and init:
            self._populatecollections()

    def _load(self, path, crawl=True, originaluri=None, includelogs=False,\
                        init=True, loadtype='href', loadcomplete=False, \
                                                path_refresh=False, prevpath=None):
        """Sequential version of loading monolith and parsing schemas.

        :param path: path to start load from.
        :type path: str
        :param crawl: flag to determine if load should traverse found links.
        :type crawl: bool
        :param originaluri: variable to assist in determining originating path.
        :type originaluri: str
        :param includelogs: flag to determine if logs should be downloaded also.
        :type includelogs: bool
        :param init: flag to determine if first run of load.
        :type init: bool
        :param loadtype: flag to determine if load is meant for only href items.
        :type loadtype: str.
        :param loadcomplete: flag to download the entire monolith
        :type loadcomplete: bool
        :param path_refresh: flag to reload the members in the monolith instead of skip if they exist.
        :type path_refresh: bool
        """

        if path.endswith("?page=1") and not loadcomplete:
            #Don't download schemas in crawl unless we are loading absolutely everything
            return
        elif not includelogs and crawl:
            #Only include logs when asked as there can be an extreme amount of entries
            if "/log" in path.lower():
                return

        #TODO: need to find a better way to support non ascii characters
        path = path.replace("|", "%7C")
        #remove fragments
        newpath = urlparse(path)
        newpath = list(newpath[:])
        newpath[-1] = ''
        path = urlunparse(tuple(newpath))

        if prevpath and prevpath != path:
            self.ctree[prevpath].update([path])
        if not path_refresh:
            if path.lower() in self.visited_urls:
                return
        LOGGER.debug('_loading %s', path)

        resp = self._client().get(path)

        if resp.status != 200 and path.lower() == self.typepath.defs.biospath:
            raise BiosUnregisteredError()
        elif resp.status == 401:
            raise SessionExpired("Invalid session. Please logout and "\
                                    "log back in or include credentials.")
        elif resp.status not in (201, 200):
            self.removepath(path)
            return

        if loadtype == "ref":
            try:
                if resp.status in (201, 200):
                    self.update_member(resp=resp, path=path, init=init)
                self._parse_schema(resp)
            except jsonpointer.JsonPointerException:
                raise SchemaValidationError()

        self.update_member(resp=resp, path=path, init=init)

        fpath = lambda pa, path: path if pa.endswith(self.typepath.defs.hrefstring) and \
            pa.startswith((self.collstr, 'Entries')) else None

        if loadtype == 'href':
            #follow all the href attributes
            if self.is_redfish:
                jsonpath_expr = jsonpath_rw.parse("$..'@odata.id'")
            else:
                jsonpath_expr = jsonpath_rw.parse('$..href')
            matches = jsonpath_expr.find(resp.dict)

            if 'links' in resp.dict and 'NextPage' in resp.dict['links']:
                if originaluri:
                    next_link_uri = originaluri + '?page=' + \
                                    str(resp.dict['links']['NextPage']['page'])
                    href = '%s' % next_link_uri

                    self._load(href, originaluri=originaluri, \
                               includelogs=includelogs, crawl=crawl, \
                               init=init, prevpath=None, loadcomplete=loadcomplete)
                else:
                    next_link_uri = path + '?page=' + str(
                        resp.dict['links']['NextPage']['page'])

                    href = '%s' % next_link_uri
                    self._load(href, originaluri=path, includelogs=includelogs,\
                        crawl=crawl, init=init, prevpath=None, loadcomplete=loadcomplete)

            #Only use monolith if we are set to
            matchrdirpath = next((match for match in matches if match.value == \
                                        self._resourcedir), None) if self.directory_load else None
            if not matchrdirpath and crawl:
                for match in matches:
                    if path == "/rest/v1" and not loadcomplete:
                        if str(match.full_path) == "links.Schemas.href" or \
                                str(match.full_path) == "links.Registries.href":
                            continue
                    elif not loadcomplete:
                        if str(match.full_path) == "*****@*****.**" or \
                                str(match.full_path) == "*****@*****.**":
                            continue

                    if match.value == path:
                        continue
                    elif not isinstance(match.value, six.string_types):
                        continue

                    href = '%s' % match.value
                    self._load(href, crawl=crawl, \
                       originaluri=originaluri, includelogs=includelogs, \
                       init=init, prevpath=fpath(str(match.full_path), path), \
                       loadcomplete=loadcomplete)
            elif crawl:
                href = '%s' % matchrdirpath.value
                self._load(href, crawl=crawl, originaluri=originaluri, \
                    includelogs=includelogs, init=init, prevpath=path, loadcomplete=loadcomplete)
            if loadcomplete:
                if path == '/rest/v1':
                    schemamatch = jsonpath_rw.parse('$..extref')
                else:
                    schemamatch = jsonpath_rw.parse('$..Uri')
                smatches = schemamatch.find(resp.dict)
                matches = matches + smatches
                for match in matches:
                    if isinstance(match.value, six.string_types):
                        self._load(match.value, crawl=crawl, originaluri=originaluri,\
                        includelogs=includelogs, init=init, loadcomplete=loadcomplete,\
                                     prevpath=fpath(str(match.full_path), path))

    def _parse_schema(self, resp):
        """Function to get and replace schema $ref with data

        :param resp: response data containing ref items.
        :type resp: str
        """
        #pylint: disable=maybe-no-member
        if not self.typepath.gencompany:
            return self._parse_schema_gen(resp)
        jsonpath_expr = jsonpath_rw.parse('$.."$ref"')
        matches = jsonpath_expr.find(resp.dict)
        respcopy = resp.dict
        typeregex = '([#,@].*?\.)'
        if matches:
            for match in matches:
                fullpath = str(match.full_path)
                jsonfile = match.value.split('#')[0]
                jsonpath = match.value.split('#')[1]
                listmatch = None
                found = None

                if 'redfish.dmtf.org' in jsonfile:
                    if 'odata' in jsonfile:
                        jsonpath = jsonpath.replace(jsonpath.split('/')[-1], \
                                            'odata' + jsonpath.split('/')[-1])
                    jsonfile = 'Resource.json'

                found = re.search(typeregex, fullpath)
                if found:
                    repitem = fullpath[found.regs[0][0]:found.regs[0][1]]
                    schemapath = '/' + fullpath.replace(repitem, '~').\
                                        replace('.', '/').replace('~', repitem)
                else:
                    schemapath = '/' + fullpath.replace('.', '/')

                if '.json' in jsonfile:
                    itempath = schemapath

                    if self.is_redfish:
                        if resp.request.path[-1] == '/':
                            newpath = '/'.join(resp.request.path.split('/')\
                                                [:-2]) + '/' + jsonfile + '/'
                        else:
                            newpath = '/'.join(resp.request.path.split('/')\
                                                [:-1]) + '/' + jsonfile + '/'
                    else:
                        newpath = '/'.join(
                            resp.request.path.split('/')[:-1]) + '/' + jsonfile

                    if 'href.json' in newpath:
                        continue

                    if newpath.lower() not in self.visited_urls:
                        self.load(newpath, crawl=False, includelogs=False, \
                                                init=False, loadtype='ref')

                    instance = list()

                    #deprecated type "string" for Type.json
                    if 'string' in self.types:
                        for item in self.iter('string'):
                            instance.append(item)
                    if 'object' in self.types:
                        for item in self.iter('object'):
                            instance.append(item)

                    for item in instance:
                        if jsonfile in item.path:
                            if 'anyOf' in fullpath:
                                break

                            dictcopy = item.dict
                            listmatch = re.search('[[][0-9]+[]]', itempath)

                            if listmatch:
                                start = listmatch.regs[0][0]
                                end = listmatch.regs[0][1]

                                newitempath = [
                                    itempath[:start], itempath[end:]
                                ]
                                start = jsonpointer.JsonPointer(newitempath[0])
                                end = jsonpointer.JsonPointer(newitempath[1])

                                del start.parts[-1], end.parts[-1]
                                vals = start.resolve(respcopy)

                                count = 0

                                for val in vals:
                                    try:
                                        if '$ref' in six.iterkeys(
                                                end.resolve(val)):
                                            end.resolve(val).pop('$ref')
                                            end.resolve(val).update(dictcopy)
                                            replace_pointer = jsonpointer.\
                                                JsonPointer(end.path + jsonpath)

                                            data = replace_pointer.resolve(val)
                                            set_pointer(val, end.path, data)
                                            start.resolve(
                                                respcopy)[count].update(val)

                                            break
                                    except:
                                        count += 1
                            else:
                                itempath = jsonpointer.JsonPointer(itempath)
                                del itempath.parts[-1]

                                if '$ref' in six.iterkeys(
                                        itempath.resolve(respcopy)):
                                    itempath.resolve(respcopy).pop('$ref')
                                    itempath.resolve(respcopy).update(dictcopy)
                                    break

                if jsonpath:
                    if 'anyOf' in fullpath:
                        continue

                    if not jsonfile:
                        replacepath = jsonpointer.JsonPointer(jsonpath)
                        schemapath = schemapath.replace('/$ref', '')
                        if re.search('\[\d]', schemapath):
                            schemapath = schemapath.translate(None, '[]')
                        schemapath = jsonpointer.JsonPointer(schemapath)
                        data = replacepath.resolve(respcopy)

                        if '$ref' in schemapath.resolve(respcopy):
                            schemapath.resolve(respcopy).pop('$ref')
                            schemapath.resolve(respcopy).update(data)

                    else:
                        if not listmatch:
                            schemapath = schemapath.replace('/$ref', '')
                            replacepath = schemapath + jsonpath
                            replace_pointer = jsonpointer.JsonPointer(
                                replacepath)
                            data = replace_pointer.resolve(respcopy)
                            set_pointer(respcopy, schemapath, data)

            resp.loaddict(respcopy)
        else:
            resp.loaddict(respcopy)

    def _parse_schema_gen(self, resp):
        """Redfish general function to get and replace schema $ref with data

        :param resp: response data containing ref items.
        :type resp: str

        """
        #pylint: disable=maybe-no-member
        getval = lambda inmat: getval(inmat.left) + '/' + str(inmat.right) \
                            if hasattr(inmat, 'left') else str(inmat)
        respcopy = resp.dict
        jsonpath_expr = jsonpath_rw.parse('$.."anyOf"')
        while True:
            matches = jsonpath_expr.find(respcopy)
            if not matches:
                break
            match = matches[0]
            newval = None
            schlist = match.value
            schlist = [ele for ele in list(schlist) if ele != {"type": "null"}]
            norefsch = [ele for ele in list(schlist) if isinstance(ele, dict) and \
                                                                                len(ele.keys()) > 1]
            if norefsch:
                newval = norefsch[0]
            else:
                newsc = [
                    ele for ele in list(schlist)
                    if not ele["$ref"].split('#')[0]
                ]
                newval = newsc[0] if newsc else None
                if not newval:
                    schlist = [ele["$ref"] for ele in list(schlist) if "$ref" in ele.keys() and \
                       (ele["$ref"].split('#')[0].endswith('.json') and 'odata' not in \
                       ele["$ref"].split('#')[0])]
                    maxsch = max(schlist)
                    newval = {"$ref": maxsch}

            itempath = '/' + getval(match.full_path)
            if re.search('\[\d+]', itempath):
                itempath = itempath.translate(None, '[]')
            itempath = jsonpointer.JsonPointer(itempath)
            del itempath.parts[-1]
            if 'anyOf' in six.iterkeys(itempath.resolve(respcopy)):
                itempath.resolve(respcopy).pop('anyOf')
                itempath.resolve(respcopy).update(newval)

        jsonpath_expr = jsonpath_rw.parse('$.."$ref"')
        matches = jsonpath_expr.find(respcopy)
        if matches:
            for _, match in enumerate(matches):
                jsonfile = match.value.split('#')[0]
                jsonfile = '' if jsonfile.lower() == resp.request.path.lower(
                ) else jsonfile
                jsonpath = match.value.split('#')[1]

                schemapath = '/' + getval(match.full_path)
                if jsonfile:
                    itempath = schemapath
                    if '/' not in jsonfile:
                        inds = -2 if resp.request.path[-1] == '/' else -1
                        jsonfile = '/'.join(resp.request.path.split('/')[:inds]) \
                                    + '/' + jsonfile + '/'
                    if jsonfile not in self.paths:
                        self.load(jsonfile, crawl=False, includelogs=False, \
                                                init=False, loadtype='ref')
                    item = self.paths[
                        jsonfile] if jsonfile in self.paths else None

                    if not item:
                        if not 'anyOf' in schemapath:
                            raise SchemaValidationError()
                        continue
                    if re.search('\[\d+]', itempath):
                        itempath = itempath.translate(None, '[]')
                    itempath = jsonpointer.JsonPointer(itempath)
                    del itempath.parts[-1]
                    if '$ref' in six.iterkeys(itempath.resolve(respcopy)):
                        itempath.resolve(respcopy).pop('$ref')
                        itempath.resolve(respcopy).update(item.dict)

                if jsonpath:
                    schemapath = schemapath.replace('/$ref', '')
                    if re.search('\[\d+]', schemapath):
                        schemapath = schemapath.translate(None, '[]')
                    if not jsonfile:
                        replacepath = jsonpointer.JsonPointer(jsonpath)
                        schemapath = jsonpointer.JsonPointer(schemapath)
                        data = replacepath.resolve(respcopy)
                        if '$ref' in schemapath.resolve(respcopy):
                            schemapath.resolve(respcopy).pop('$ref')
                            schemapath.resolve(respcopy).update(data)
                    else:
                        replacepath = schemapath + jsonpath
                        replace_pointer = jsonpointer.\
                                                    JsonPointer(replacepath)
                        data = replace_pointer.resolve(respcopy)
                        set_pointer(respcopy, schemapath, data)

            resp.loaddict(respcopy)
        else:
            resp.loaddict(respcopy)

    def load_from_dict(self, src):
        """Load data to monolith from a dict. This is the reverse of :func:`to_dict`.

        :param src: data receive from rest operation.
        :type src: str

        """
        self._type = src['Type']
        self._name = src['Name']
        self.typesadded = defaultdict(
            set, {ki: set(val)
                  for ki, val in src['typepath'].iteritems()})
        self.ctree = defaultdict(
            set, {ki: set(val)
                  for ki, val in src['ctree'].iteritems()})
        self.colltypes = defaultdict(
            set, {ki: set(val)
                  for ki, val in src['colls'].iteritems()})
        for _, resp in list(src['resps'].items()):
            member = RisMonolithMemberv100(None, self.is_redfish)
            member.load_from_dict(resp)
            self.update_member(member=member, init=False)

    def to_dict(self):
        """Convert data to a dict from monolith. This is the reverse of :func:`load_from_dict`."""
        result = OrderedDict()
        result['Type'] = self.type
        result['Name'] = self.name
        result["typepath"] = self.typesadded
        result['ctree'] = self.ctree
        result['colls'] = self.colltypes
        result["resps"] = {x: v.to_dict() for x, v in list(self.paths.items())}
        return result

    def markmodified(self, opath, path=None, modpaths=None):
        """Mark the paths to be modifed which are connected to current path. When calling this
        function you only need to include `opath`.

        :param opath: original path which has been modified
        :type opath: str
        """
        modpaths = set() if modpaths is None else modpaths
        path = path if path else opath
        if not path:
            return
        modpaths.update(self.ctree[path] if path in self.ctree else set())
        self.paths[path].modified = True
        for npath in [unmodpath for unmodpath in modpaths if unmodpath \
                                        in self.paths and not self.paths[unmodpath].modified]:
            self.markmodified(opath, path=npath, modpaths=modpaths)
        return modpaths

    def checkmodified(self, opath, path=None, modpaths=None):
        """Check if the path or its children are modified. When calling this
        function you only need to include `opath`.

        :param opath: original path which has been modified
        :type opath: str
        """
        #return [paths for paths in self.ctree[path] if self.paths[paths].modified]
        modpaths = set() if modpaths is None else modpaths
        path = path if path else opath
        newpaths = set()
        if not path:
            return
        if path in self.paths and self.paths[path].modified:
            newpaths = set([conn for conn in self.ctree[path] if conn in \
                 self.paths and self.paths[path].modified]) - modpaths
            modpaths.update(newpaths | set([path]))
        for npath in [unmodpath for unmodpath in newpaths]:
            self.checkmodified(opath, path=npath, modpaths=modpaths)
        return modpaths

    def removepath(self, path):
        """Remove a given path from the cache

        :param path: path which is to be checked if modified
        :type path: str
        """
        if path in self._visited_urls:
            self._visited_urls.remove(path)
        if not path in self.paths:
            return
        if path in self.typesadded[self.paths[path].maj_type]:
            self.typesadded[self.paths[path].maj_type].remove(path)
        if not self.typesadded[self.paths[path].maj_type]:
            del self.typesadded[self.paths[path].maj_type]
        del self.paths[path]
        if path in self.ctree:
            del self.ctree[path]
        _ = [
            self.ctree[paths].remove(path) for paths in self.ctree
            if path in self.ctree[paths]
        ]

    def _populatecollections(self):
        """Populate the collections type and types depending on resourcedirectory"""
        if not self._resourcedir in self.paths:
            return
        self.colltypes = defaultdict(set)
        alltypes = []
        colls = []
        for item in self.paths[self._resourcedir].dict["Instances"]:
            #Fix for incorrect RDir instances.
            if not self.typepath.defs.typestring in item or item[self.typepath.defs.hrefstring] \
                                                                                    in self.paths:
                continue
            typename = ".".join(item[self.typepath.defs.typestring].split(".", 2)[:2])\
                                                                                    .split('#')[-1]
            _ = [
                alltypes.append(typename)
                if not 'Collection' in typename else None
            ]
            _ = [colls.append(typename) if 'Collection' in typename else None]
            member = RisMonolithMemberv100(None, self.is_redfish)
            member.popdefs(typename, item[self.typepath.defs.hrefstring],
                           item[self.etagstr])
            self.update_member(member=member, init=False)
        for coll in colls:
            collname = coll.split('Collection')[0].split('#')[-1]
            typename = next(
                (name for name in alltypes if name.startswith(collname)), None)
            colltype = ".".join(coll.split(".", 2)[:2]).split('#')[-1]
            self.colltypes[typename].add(colltype)

    def capture(self, redmono=False):
        """Crawls the server specified by the client and returns the entire monolith.

        :param redmono: Flag to return only the headers and responses instead of the entire monolith
                        member data.
        :type redmono: bool
        :rtype: dict
        """
        self.load(includelogs=True,
                  crawl=True,
                  loadcomplete=True,
                  path_refresh=True,
                  init=True)
        return self.to_dict() if not redmono else {x:{"Headers":v.resp.getheaders(), \
                "Response":v.resp.dict} for x, v in list(self.paths.items()) if v}

    def killthreads(self):
        """Function to kill threads on logout"""
        threads = []
        for thread in threading.enumerate():
            if isinstance(thread, LoadWorker):
                self.get_queue.put(('KILL', 'KILL', 'KILL', 'KILL',\
                                'KILL', 'KILL', 'KILL', 'KILL', 'KILL', 'KILL'))
                threads.append(thread)

        for thread in threads:
            thread.join()

    def _update_progress(self):
        """Simple function to increment the dot progress"""
        if self.progress % 6 == 0:
            sys.stdout.write('.')
コード例 #38
0
        if choice != '':
            job_queue.put({
                'q': entity_id,
                'action': 'set_description',
                'lang': 'nb',
                'value': choice,
                'summary': '#no_to_nb cleanup drive'
            })

            if labels['no'].get('description') is not None:
                job_queue.put({
                    'q': entity_id,
                    'action': 'remove_description',
                    'lang': 'no',
                    'summary': '#no_to_nb cleanup drive'
                })

    completed += 1
    t1 = time.time() - t0
    if not bg_thread.isAlive():
        print('Thread exited. Starting a new')
        bg_thread = start_thread()

    while job_queue.qsize() > 100:
        print('Job queue length: %d, sleeping a while' % job_queue.qsize())
        time.sleep(10)
    print('Status: Fixed %s items. %s items left to go, time: %.2f sec/item, job queue length: %d' % (completed, len(rows), t1 / completed, job_queue.qsize()))

print('*** Main thread waiting')
job_queue.join()
print('*** Done')
コード例 #39
0
ファイル: server.py プロジェクト: psav/riggerlib
class Rigger(object):
    """ A Rigger event framework instance.

    The Rigger object holds all configuration and instances of plugins. By default Rigger accepts
    a configuration file name to parse, though it is perfectly acceptable to pass the configuration
    into the ``self.config`` attribute.

    Args:
        config_file: A configuration file holding all of Riggers base and plugin configuration.
    """
    def __init__(self, config_file):
        self.gdl = threading.Lock()
        self.pre_callbacks = defaultdict(dict)
        self.post_callbacks = defaultdict(dict)
        self.plugins = {}
        self.config_file = config_file
        self.squash_exceptions = False
        self.initialized = False
        self._task_list = {}
        self._queue_lock = threading.Lock()
        self._global_queue = Queue()
        self._background_queue = Queue()
        self._server_shutdown = False
        self._zmq_event_handler_shutdown = False
        self._global_queue_shutdown = False
        self._background_queue_shutdown = False

        globt = threading.Thread(target=self.process_queue, name="global_queue_processor")
        globt.start()
        bgt = threading.Thread(
            target=self.process_background_queue, name="background_queue_processor")
        bgt.start()

    def process_queue(self):
        """
        The ``process_queue`` thread manages taking events on and off of the global queue.
        Both TCP and in-object fire_hooks place events onto the global_queue and these are both
        handled by the same handler called ``process_hook``. If there is an exception during
        processing, the exception is printed and execution continues.
        """
        while not self._global_queue_shutdown:
            while not self._global_queue.empty():
                with self._queue_lock:
                    tid = self._global_queue.get()
                    obj = self._task_list[tid].json_dict
                    self._task_list[tid].status = Task.RUNNING
                try:
                    loc, glo = self.process_hook(obj['hook_name'], **obj['data'])
                    combined_dict = {}
                    combined_dict.update(glo)
                    combined_dict.update(loc)
                    self._task_list[tid].output = combined_dict
                except Exception as e:
                    self.log_message(e)
                with self._queue_lock:
                    self._global_queue.task_done()
                    self._task_list[tid].status = Task.FINISHED
                if not self._task_list[tid].json_dict.get('grab_result', None):
                    del self._task_list[tid]
            time.sleep(0.1)

    def process_background_queue(self):
        """
        The ``process_background_queue`` manages the hooks which have been backgrounded. In this
        respect the tasks that are completed are not required to continue with the test and as such
        can be forgotten about. An example of this would be some that sends an email, or tars up
        files, it has all the information it needs and the main process doesn't need to wait for it
        to complete.
        """
        while not self._background_queue_shutdown:
            while not self._background_queue.empty():
                obj = self._background_queue.get()
                try:
                    local, globals_updates = self.process_callbacks(obj['cb'], obj['kwargs'])
                    with self.gdl:
                        self.global_data = recursive_update(self.global_data, globals_updates)
                except Exception as e:
                    self.log_message(e)
                self._background_queue.task_done()
            time.sleep(0.1)

    def zmq_event_handler(self, zmq_socket_address):
        """
        The ``zmq_event_handler`` thread receives (and responds to) updates from the
        zmq socket, which is normally embedded in the web server running alongside this
        riggerlib instance, in its own process.

        """
        ctx = zmq.Context()
        zmq_socket = ctx.socket(zmq.REP)
        zmq_socket.set(zmq.RCVTIMEO, 300)
        zmq_socket.bind(zmq_socket_address)

        def zmq_reply(message, **extra):
            payload = {'message': message}
            payload.update(extra)
            zmq_socket.send_json(payload)
        bad_request = partial(zmq_reply, 'BAD REQUEST')

        while not self._zmq_event_handler_shutdown:
            try:
                json_dict = zmq_socket.recv_json()
            except zmq.Again:
                continue

            try:
                event_name = json_dict['event_name']
            except KeyError:
                bad_request()

            if event_name == 'fire_hook':
                tid = self._fire_internal_hook(json_dict)
                if tid:
                    zmq_reply('OK', tid=tid)
                else:
                    bad_request()
            elif event_name == 'task_check':
                try:
                    tid = json_dict['tid']
                    extra = {
                        "tid": tid,
                        "status": self._task_list[tid].status,
                    }
                    if json_dict['grab_result']:
                        extra["output"] = self._task_list[tid].output
                    zmq_reply('OK', **extra)
                except KeyError:
                    zmq_reply('NOT FOUND')
            elif event_name == 'task_delete':
                try:
                    tid = json_dict['tid']
                    del self._task_list[tid]
                    zmq_reply('OK', tid=tid)
                except KeyError:
                    zmq_reply('OK', tid=tid)
            elif event_name == 'shutdown':
                zmq_reply('OK')
                # We gotta initiate server stop from here and stop this thread
                self._server_shutdown = True
                break
            elif event_name == 'ping':
                zmq_reply('PONG')
            else:
                bad_request()

        zmq_socket.close()

    def read_config(self, config_file):
        """
        Reads in the config file and parses the yaml data.

        Args:
            config_file: A configuration file holding all of Riggers base and plugin configuration.

        Raises:
            IOError: If the file can not be read.
            Exception: If there is any error parsing the configuration file.
        """
        try:
            with open(config_file, "r") as stream:
                data = yaml.load(stream)
        except IOError:
            print("!!! Configuration file could not be loaded...exiting")
            sys.exit(127)
        except Exception as e:
            print(e)
            print("!!! Error parsing Configuration file")
            sys.exit(127)
        self.config = data

    def parse_config(self):
        """
        Takes the configuration data from ``self.config`` and sets up the plugin instances.
        """
        self.read_config(self.config_file)
        self.setup_plugin_instances()
        self.start_server()

    def setup_plugin_instances(self):
        """
        Sets up instances into a dict called ``self.instances`` and instantiates each
        instance of the plugin. It also sets the ``self._threaded`` option to determine
        if plugins will be processed synchronously or asynchronously.
        """
        self.instances = {}
        self._threaded = self.config.get("threaded", False)
        plugins = self.config.get("plugins", {})
        for ident, config in plugins.items():
            self.setup_instance(ident, config)

    def setup_instance(self, ident, config):
        """
        Sets up a single instance into the ``self.instances`` dict. If the instance does
        not exist, a warning is printed out.

        Args:
            ident: A plugin instance identifier.
            config: Configuration dict from the yaml.
        """
        plugin_name = config.get('plugin', {})
        if plugin_name in self.plugins:
            obj = self.plugins[plugin_name]
            if obj:
                obj_instance = obj(ident, config, self)
                self.instances[ident] = RiggerPluginInstance(ident, obj_instance, config)
        else:
            msg = "Plugin [{}] was not found, "\
                  "disabling instance [{}]".format(plugin_name, ident)
            self.log_message(msg)

    def start_server(self):
        """
        Starts the ZMQ server if the ``server_enabled`` is True in the config.
        """
        self._server_hostname = self.config.get('server_address', '127.0.0.1')
        self._server_port = self.config.get('server_port', 21212)
        self._server_enable = self.config.get('server_enabled', False)
        if self._server_enable:
            zmq_socket_address = 'tcp://{}:{}'.format(self._server_hostname, self._server_port)
            # set up reciever thread for zmq event handling
            zeh = threading.Thread(
                target=self.zmq_event_handler, args=(zmq_socket_address,), name="zmq_event_handler")
            zeh.start()
            exect = threading.Thread(target=self.await_shutdown, name="executioner")
            exect.start()

    def await_shutdown(self):
        while not self._server_shutdown:
            time.sleep(0.3)
        self.stop_server()

    def stop_server(self):
        """
        Responsible for the following:
            - stopping the zmq event handler (unless already stopped through 'terminate')
            - stopping the global queue
            - stopping the background queue
        """
        self.log_message("Shutdown initiated : {}".format(self._server_hostname))
        # The order here is important
        self._zmq_event_handler_shutdown = True
        self._global_queue.join()
        self._global_queue_shutdown = True
        self._background_queue.join()
        self._background_queue_shutdown = True
        raise SystemExit

    def fire_hook(self, hook_name, **kwargs):
        """
        Parses the hook information into a dict for passing to process_hook. This is used
        to enable both the TCP and in-object fire_hook methods to use the same process_hook
        method call.

        Args:
            hook_name: The name of the hook to fire.
            kwargs: The kwargs to pass to the hooks.

        """
        json_dict = {'hook_name': hook_name, 'data': kwargs}
        self._fire_internal_hook(json_dict)

    def _fire_internal_hook(self, json_dict):
        task = Task(json_dict)
        tid = task.tid.hexdigest()
        self._task_list[tid] = task
        if self._global_queue:
            with self._queue_lock:
                self._global_queue.put(tid)
            return tid
        else:
            return None

    def process_hook(self, hook_name, **kwargs):
        """
        Takes a hook_name and a selection of kwargs and fires off the appropriate callbacks.

        This function is the guts of Rigger and is responsible for running the callback and
        hook functions. It first loads some blank dicts to collect the updates for the local
        and global namespaces. After this, it loads the pre_callback functions along with
        the kwargs into the callback collector processor.

        The return values are then classifed into local and global dicts and updates proceed.
        After this, the plugin hooks themselves are then run using the same methodology. Their
        return values are merged with the existing dicts and then the same process happens
        for the post_callbacks.

        Note: If the instance of the plugin has been marked as a background instance, and hooks
              which are called in that instance will be backgrounded. The hook will also not
              be able to return any data to the post-hook callback, although updates to globals
              will be processed as and when the backgrounded task is completed.

        Args:
            hook_name: The name of the hook to fire.
            kwargs: The kwargs to pass to the hooks.
        """
        if not self.initialized:
            return
        kwargs_updates = {}
        globals_updates = {}
        kwargs.update({'config': self.config})

        # First fire off any pre-hook callbacks
        if self.pre_callbacks.get(hook_name):
            # print "Running pre hook callback for {}".format(hook_name)
            kwargs_updates, globals_updates = self.process_callbacks(
                self.pre_callbacks[hook_name].values(), kwargs)

            # Now we can update the kwargs passed to the real hook with the updates
            with self.gdl:
                self.global_data = recursive_update(self.global_data, globals_updates)
            kwargs = recursive_update(kwargs, kwargs_updates)

        # Now fire off each plugin hook
        event_hooks = []
        for instance_name, instance in self.instances.items():
            callbacks = instance.obj.callbacks
            enabled = instance.data.get('enabled', None)
            if callbacks.get(hook_name) and enabled:
                cb = callbacks[hook_name]
                if instance.data.get('background', False):
                    self._background_queue.put({'cb': [cb], 'kwargs': kwargs})
                elif cb['bg']:
                    self._background_queue.put({'cb': [cb], 'kwargs': kwargs})
                else:
                    event_hooks.append(cb)
        kwargs_updates, globals_updates = self.process_callbacks(event_hooks, kwargs)

        # One more update for the post_hook callback
        with self.gdl:
            self.global_data = recursive_update(self.global_data, globals_updates)
        kwargs = recursive_update(kwargs, kwargs_updates)

        # Finally any post-hook callbacks
        if self.post_callbacks.get(hook_name):
            # print "Running post hook callback for {}".format(hook_name)
            kwargs_updates, globals_updates = self.process_callbacks(
                self.post_callbacks[hook_name].values(), kwargs)
            with self.gdl:
                self.global_data = recursive_update(self.global_data, globals_updates)
            kwargs = recursive_update(kwargs, kwargs_updates)
        return kwargs, self.global_data

    def process_callbacks(self, callback_collection, kwargs):
        """
        Processes a collection of callbacks or hooks for a particular event, namely pre, hook or
        post.

        The functions are passed in as an array to ``callback_collection`` and process callbacks
        first iterates each function and ensures that each one has the correct arguments available
        to it. If not, an Exception is raised. Then, depending on whether Threading is enabled or
        not, the functions are either run sequentially, or loaded into a ThreadPool and executed
        asynchronously.

        The returned local and global updates are either collected and processed sequentially, as
        in the case of the non-threaded behaviour, or collected at the end of the
        callback_collection processing and handled there.

        Note:
            It is impossible to predict the order of the functions being run. If the order is
            important, it is advised to create a second event hook that will be fired before the
            other. Rigger has no concept of hook or callback order and is unlikely to ever have.

        Args:
            callback_collection: A list of functions to call.
            kwargs: A set of kwargs to pass to the functions.

        Returns: A tuple of local and global namespace updates.
        """
        loc_collect = {}
        glo_collect = {}
        if self._threaded:
            results_list = []
            pool = ThreadPool(10)
        for cb in callback_collection:
            required_args = [sig for sig in cb['args'] if isinstance(cb['args'][sig].default, type)]
            missing = list(set(required_args).difference(set(self.global_data.keys()))
                           .difference(set(kwargs.keys())))
            if not missing:
                new_kwargs = self.build_kwargs(cb['args'], kwargs)
                if self._threaded:
                    results_list.append(pool.apply_async(cb['func'], [], new_kwargs))
                else:
                    obtain_result = self.handle_results(cb['func'], [], new_kwargs)
                    loc_collect, glo_collect = self.handle_collects(
                        obtain_result, loc_collect, glo_collect)
            else:
                raise Exception('Function {} is missing kwargs {}'
                                .format(cb['func'].__name__, missing))

        if self._threaded:
            pool.close()
            pool.join()
            for result in results_list:
                obtain_result = self.handle_results(result.get, [], {})
                loc_collect, glo_collect = self.handle_collects(
                    obtain_result, loc_collect, glo_collect)
        return loc_collect, glo_collect

    def handle_results(self, call, args, kwargs):
        """
        Handles results and depending on configuration, squashes exceptions and logs or
        returns the obtained result.

        Args:
            call: The function call.
            args: The positional arguments.
            kwargs: The keyword arguments.

        Returns: The obtained result of the callback or hook.
        """
        try:
            obtain_result = call(*args, **kwargs)
        except:
            if self.squash_exceptions:
                obtain_result = None
                self.handle_failure(sys.exc_info())
            else:
                raise

        return obtain_result

    def handle_collects(self, result, loc_collect, glo_collect):
        """
        Handles extracting the information from the hook/callback result.

        If the hook/callback returns None, then the dicts are returned unaltered, else
        they are updated with local, global namespace updates.

        Args:
            result: The result to process.
            loc_collect: The local namespace updates collection.
            glo_collect: The global namespace updates collection.
        Returns: A tuple containing the local and global updates.
        """
        if result:
            if result[0]:
                loc_collect = recursive_update(loc_collect, result[0])
            if result[1]:
                glo_collect = recursive_update(glo_collect, result[1])
        return loc_collect, glo_collect

    def build_kwargs(self, args, kwargs):
        """
        Builds a new kwargs from a list of allowed args.

        Functions only receive a single set of kwargs, and so the global and local namespaces
        have to be collapsed. In this way, the local overrides the global namespace, hence if
        a key exists in both local and global, the local value will be passed to the function
        under the the key name and the global value will be forgotten.

        The args parameter ensures that only the expected arguments are supplied.

        Args:
            args: A list of allowed argument names
            kwargs: A dict of kwargs from the local namespace.
        Returns: A consolidated global/local namespace with local overrides.
        """
        returned_args = {}
        returned_args.update({
            name: self.global_data[name] for name in args
            if name in self.global_data})
        returned_args.update({
            name: kwargs[name] for name in args
            if name in kwargs})
        return returned_args

    def register_hook_callback(self, hook_name=None, ctype="pre", callback=None, name=None):
        """
        Registers pre and post callbacks.

        Takes a callback function and assigns it to the hook_name with an optional identifier.
        The optional identifier makes it possible to hot bind functions into hooks and to
        remove them at a later date with ``unregister_hook_callback``.

        Args:
            hook_name: The name of the event hook to respond to.
            ctype: The call back type, either ``pre`` or ``post``.
            callback: The callback function.
            name: An optional name for the callback instance binding.
        """
        if hook_name and callback:
            callback_instance = self.create_callback(callback)
            if not name:
                name = hashlib.sha1(
                    str(time.time()) + hook_name + str(callback_instance['args'])).hexdigest()
            if ctype == "pre":
                self.pre_callbacks[hook_name][name] = callback_instance
            elif ctype == "post":
                self.post_callbacks[hook_name][name] = callback_instance

    def unregister_hook_callback(self, hook_name, ctype, name):
        """
        Unregisters a pre or post callback.

        If the binding has a known name, this function allows the removal of a binding.

        Args:
            hook_name: The event hook name.
            ctype: The callback type, either ``pre`` or ``post``.
            name: An optional name for the callback instance binding.
        """
        if ctype == "pre":
            del self.pre_callbacks[hook_name][name]
        elif ctype == "post":
            del self.post_callbacks[hook_name][name]

    def register_plugin(self, cls, plugin_name=None):
        """ Registers a plugin class to a name.

        Multiple instances of the same plugin can be used in Rigger, ``self.plugins``
        stores un-initialized class defintions to be used by ``setup_instances``.

        Args:
            cls: The class.
            plugin_name: The name of the plugin.
        """
        if plugin_name in self.plugins:
            print("Plugin name already taken [{}]".format(plugin_name))
        elif plugin_name is None:
            print("Plugin name cannot be None")
        else:
            # print "Registering plugin {}".format(plugin_name)
            self.plugins[plugin_name] = cls

    def get_instance_obj(self, name):
        """
        Gets the instance object for a given ident name.

        Args:
            name: The ident name of the instance.
        Returns: The object of the instance.
        """
        if name in self.instances:
            return self.instances[name].obj
        else:
            return None

    def get_instance_data(self, name):
        """
        Gets the instance data(config) for a given ident name.

        Args:
            name: The ident name of the instance.
        Returns: The data(config) of the instance.
        """
        if name in self.instances:
            return self.instances[name].data
        else:
            return None

    def configure_plugin(self, name, *args, **kwargs):
        """
        Attempts to configure an instance, passing it the args and kwargs.

        Args:
            name: The ident name of the instance.
            args: The positional args.
            kwargs: The keyword arguments.
        """
        obj = self.get_instance_obj(name)
        if obj:
            obj.configure(*args, **kwargs)

    @staticmethod
    def create_callback(callback, bg=False):
        """
        Simple function to inspect a function and return it along with it param names wrapped
        up in a nice dict. This forms a callback object.

        Args:
            callback: The callback function.
        Returns: A dict of function and param names.
        """
        params = signature(callback).parameters
        return {
            'func': callback,
            'args': params,
            'bg': bg
        }

    def handle_failure(self, exc):
        """
        Handles an exception. It is expected that this be overidden.
        """
        self.log_message(exc)

    def log_message(self, message):
        """
        "Logs" a message. It is expected that this be overidden.
        """
        print(message)
コード例 #40
0
ファイル: execute.py プロジェクト: thakurmm/galaxy
def execute(trans,
            tool,
            mapping_params,
            history,
            rerun_remap_job_id=None,
            collection_info=None,
            workflow_invocation_uuid=None,
            invocation_step=None,
            max_num_jobs=None,
            job_callback=None,
            completed_jobs=None):
    """
    Execute a tool and return object containing summary (output data, number of
    failures, etc...).
    """
    if max_num_jobs:
        assert invocation_step is not None
    if rerun_remap_job_id:
        assert invocation_step is None

    all_jobs_timer = ExecutionTimer()
    if invocation_step is None:
        execution_tracker = ToolExecutionTracker(trans, tool, mapping_params,
                                                 collection_info)
    else:
        execution_tracker = WorkflowStepExecutionTracker(
            trans,
            tool,
            mapping_params,
            collection_info,
            invocation_step,
            job_callback=job_callback)
    app = trans.app
    execution_cache = ToolExecutionCache(trans)

    def execute_single_job(execution_slice, completed_job):
        job_timer = ExecutionTimer()
        params = execution_slice.param_combination
        if workflow_invocation_uuid:
            params['__workflow_invocation_uuid__'] = workflow_invocation_uuid
        elif '__workflow_invocation_uuid__' in params:
            # Only workflow invocation code gets to set this, ignore user supplied
            # values or rerun parameters.
            del params['__workflow_invocation_uuid__']

        job, result = tool.handle_single_execution(trans, rerun_remap_job_id,
                                                   execution_slice, history,
                                                   execution_cache,
                                                   completed_job)
        if job:
            message = EXECUTION_SUCCESS_MESSAGE % (tool.id, job.id, job_timer)
            log.debug(message)
            execution_tracker.record_success(execution_slice, job, result)
        else:
            execution_tracker.record_error(result)

    tool_action = tool.tool_action
    if hasattr(tool_action, "check_inputs_ready"):
        for params in execution_tracker.param_combinations:
            # This will throw an exception if the tool is not ready.
            tool_action.check_inputs_ready(tool, trans, params, history)

    execution_tracker.ensure_implicit_collections_populated(
        history, mapping_params.param_template)
    config = app.config
    burst_at = getattr(config, 'tool_submission_burst_at', 10)
    burst_threads = getattr(config, 'tool_submission_burst_threads', 1)

    job_count = len(execution_tracker.param_combinations)

    jobs_executed = 0
    has_remaining_jobs = False

    if (job_count < burst_at or burst_threads < 2):
        for i, execution_slice in enumerate(
                execution_tracker.new_execution_slices()):
            if max_num_jobs and jobs_executed >= max_num_jobs:
                has_remaining_jobs = True
                break
            else:
                execute_single_job(execution_slice, completed_jobs[i])
    else:
        # TODO: re-record success...
        q = Queue()

        def worker():
            while True:
                params = q.get()
                execute_single_job(params)
                q.task_done()

        for i in range(burst_threads):
            t = Thread(target=worker)
            t.daemon = True
            t.start()

        for i, execution_slice in enumerate(
                execution_tracker.new_execution_slices()):
            if max_num_jobs and jobs_executed >= max_num_jobs:
                has_remaining_jobs = True
                break
            else:
                q.put(execution_slice, completed_jobs[i])
                jobs_executed += 1

        q.join()

    if has_remaining_jobs:
        raise PartialJobExecution(execution_tracker)
    else:
        execution_tracker.finalize_dataset_collections(trans)

    log.debug("Executed %d job(s) for tool %s request: %s" %
              (job_count, tool.id, all_jobs_timer))
    return execution_tracker
コード例 #41
0
class RisMonolithv100(Dictable):
    """Monolithic cache of RIS data"""
    def __init__(self, client):
        """Initialize RisMonolith

        :param client: client to utilize
        :type client: RmcClient object

        """
        self._client = client
        self.name = "Monolithic output of RIS Service"
        self.types = OrderedDict()
        self._visited_urls = list()
        self._current_location = '/'  # "root"
        self.queue = Queue()
        self._type = None
        self._name = None
        self.progress = 0
        self.reload = False
        self.is_redfish = client._rest_client.is_redfish

        if self.is_redfish:
            self._resourcedir = '/redfish/v1/ResourceDirectory/'
            self._typestring = '@odata.type'
            self._hrefstring = '@odata.id'
        else:
            self._resourcedir = '/rest/v1/ResourceDirectory'
            self._typestring = 'Type'
            self._hrefstring = 'href'

    def _get_type(self):
        """Return monolith version type"""
        return "Monolith.1.0.0"

    type = property(_get_type, None)

    def update_progress(self):
        """Simple function to increment the dot progress"""
        if self.progress % 6 == 0:
            sys.stdout.write('.')

    def get_visited_urls(self):
        """Return the visited URLS"""
        return self._visited_urls

    def set_visited_urls(self, visited_urls):
        """Set visited URLS to given list."""
        self._visited_urls = visited_urls

    def load(self, path=None, includelogs=False, skipinit=False, \
                        skipcrawl=False, loadtype='href', loadcomplete=False):
        """Walk entire RIS model and cache all responses in self.

        :param path: path to start load from.
        :type path: str.
        :param includelogs: flag to determine if logs should be downloaded also.
        :type includelogs: boolean.
        :param skipinit: flag to determine if first run of load.
        :type skipinit: boolean.
        :param skipcrawl: flag to determine if load should traverse found links.
        :type skipcrawl: boolean.
        :param loadtype: flag to determine if load is meant for only href items.
        :type loadtype: str.
        :param loadcomplete: flag to download the entire monolith
        :type loadcomplete: boolean

        """
        if not skipinit:
            if LOGGER.getEffectiveLevel() == 40:
                sys.stdout.write("Discovering data...")
            else:
                LOGGER.info("Discovering data...")
            self.name = self.name + ' at %s' % self._client.base_url

            if not self.types:
                self.types = OrderedDict()

        if not threading.active_count() >= 6:
            for _ in range(5):
                workhand = SuperDuperWorker(self.queue)
                workhand.setDaemon(True)
                workhand.start()

        selectivepath = path
        if not selectivepath:
            selectivepath = self._client._rest_client.default_prefix

        self._load(selectivepath, skipcrawl=skipcrawl, includelogs=includelogs,\
             skipinit=skipinit, loadtype=loadtype, loadcomplete=loadcomplete)
        self.queue.join()

        if not skipinit:
            if LOGGER.getEffectiveLevel() == 40:
                sys.stdout.write("Done\n")
            else:
                LOGGER.info("Done\n")

    def _load(self, path, skipcrawl=False, originaluri=None, includelogs=False,\
                        skipinit=False, loadtype='href', loadcomplete=False):
        """Helper function to main load function.

        :param path: path to start load from.
        :type path: str.
        :param skipcrawl: flag to determine if load should traverse found links.
        :type skipcrawl: boolean.
        :param originaluri: variable to assist in determining originating path.
        :type originaluri: str.
        :param includelogs: flag to determine if logs should be downloaded also.
        :type includelogs: boolean.
        :param skipinit: flag to determine if first run of load.
        :type skipinit: boolean.
        :param loadtype: flag to determine if load is meant for only href items.
        :type loadtype: str.
        :param loadcomplete: flag to download the entire monolith
        :type loadcomplete: boolean

        """
        if path.endswith("?page=1"):
            return
        elif not includelogs:
            if "/Logs/" in path:
                return

        #TODO: need to find a better way to support non ascii characters
        path = path.replace("|", "%7C")
        #remove fragments
        newpath = urlparse(path)
        newpath = list(newpath[:])
        newpath[-1] = ''
        path = urlunparse(tuple(newpath))

        LOGGER.debug('_loading %s', path)

        if not self.reload:
            if path.lower() in self._visited_urls:
                return

        resp = self._client.get(path)

        if resp.status != 200 and path.lower() == self._client.typepath.defs.\
                                                                    biospath:
            raise BiosUnregisteredError()
        elif resp.status != 200:
            path = path + '/'
            resp = self._client.get(path)

            if resp.status == 401:
                raise SessionExpiredRis("Invalid session. Please logout and "\
                                        "log back in or include credentials.")
            elif resp.status != 200:
                return

        if loadtype == "ref":
            self.parse_schema(resp)

        self.queue.put((resp, path, skipinit, self))

        if loadtype == 'href':
            #follow all the href attributes
            if self.is_redfish:
                jsonpath_expr = jsonpath_rw.parse("$..'@odata.id'")
            else:
                jsonpath_expr = jsonpath_rw.parse('$..href')
            matches = jsonpath_expr.find(resp.dict)

            if 'links' in resp.dict and 'NextPage' in resp.dict['links']:
                if originaluri:
                    next_link_uri = originaluri + '?page=' + \
                                    str(resp.dict['links']['NextPage']['page'])
                    href = '%s' % next_link_uri

                    self._load(href, originaluri=originaluri, \
                               includelogs=includelogs, skipcrawl=skipcrawl, \
                               skipinit=skipinit)
                else:
                    next_link_uri = path + '?page=' + \
                                    str(resp.dict['links']['NextPage']['page'])

                    href = '%s' % next_link_uri
                    self._load(href, originaluri=path, includelogs=includelogs,\
                                        skipcrawl=skipcrawl, skipinit=skipinit)

            (newversion, dirmatch) = self.check_for_directory(matches)
            if not newversion and not skipcrawl:
                for match in matches:
                    if path == "/rest/v1":
                        if str(match.full_path) == "links.Schemas.href" or \
                                str(match.full_path) == "links.Registries.href":
                            continue
                    else:
                        if str(match.full_path) == "*****@*****.**" or \
                                str(match.full_path) == "*****@*****.**":
                            continue

                    if match.value == path:
                        continue

                    href = '%s' % match.value
                    self._load(href, skipcrawl=skipcrawl, \
                           originaluri=originaluri, includelogs=includelogs, \
                           skipinit=skipinit)
            elif not skipcrawl:
                href = '%s' % dirmatch.value
                self._load(href, skipcrawl=skipcrawl, originaluri=originaluri, \
                                    includelogs=includelogs, skipinit=skipinit)
            if loadcomplete:
                for match in matches:
                    self._load(match.value, skipcrawl=skipcrawl, originaluri=\
                       originaluri, includelogs=includelogs, skipinit=skipinit)

    def parse_schema(self, resp):
        """Function to get and replace schema $ref with data

        :param resp: response data containing ref items.
        :type resp: str.

        """
        #pylint: disable=maybe-no-member
        jsonpath_expr = jsonpath_rw.parse('$.."$ref"')
        matches = jsonpath_expr.find(resp.dict)
        respcopy = resp.dict
        typeregex = '([#,@].*?\.)'
        if matches:
            for match in matches:
                fullpath = str(match.full_path)
                jsonfile = match.value.split('#')[0]
                jsonpath = match.value.split('#')[1]
                listmatch = None
                found = None

                if 'redfish.dmtf.org' in jsonfile:
                    if 'odata' in jsonfile:
                        jsonpath = jsonpath.replace(jsonpath.split('/')[-1], \
                                            'odata' + jsonpath.split('/')[-1])
                    jsonfile = 'Resource.json'

                found = re.search(typeregex, fullpath)
                if found:
                    repitem = fullpath[found.regs[0][0]:found.regs[0][1]]
                    schemapath = '/' + fullpath.replace(repitem, '~').\
                                        replace('.', '/').replace('~', repitem)
                else:
                    schemapath = '/' + fullpath.replace('.', '/')

                if '.json' in jsonfile:
                    itempath = schemapath

                    if self.is_redfish:
                        if resp.request.path[-1] == '/':
                            newpath = '/'.join(resp.request.path.split('/')\
                                                [:-2]) + '/' + jsonfile + '/'
                        else:
                            newpath = '/'.join(resp.request.path.split('/')\
                                                [:-1]) + '/' + jsonfile + '/'
                    else:
                        newpath = '/'.join(resp.request.path.split('/')[:-1]) \
                                                                + '/' + jsonfile

                    if 'href.json' in newpath:
                        continue

                    if not newpath.lower() in self._visited_urls:
                        self.load(newpath, skipcrawl=True, includelogs=False, \
                                                skipinit=True, loadtype='ref')

                    instance = list()

                    if 'st' in self.types:
                        for stitem in self.types['st']['Instances']:
                            instance.append(stitem)
                    if 'ob' in self.types:
                        for obitem in self.types['ob']['Instances']:
                            instance.append(obitem)

                    for item in instance:
                        if jsonfile in item.resp._rest_request._path:
                            if 'anyOf' in fullpath:
                                break

                            dictcopy = item.resp.dict
                            listmatch = re.search('[[][0-9]+[]]', itempath)

                            if listmatch:
                                start = listmatch.regs[0][0]
                                end = listmatch.regs[0][1]

                                newitempath = [
                                    itempath[:start], itempath[end:]
                                ]
                                start = jsonpointer.JsonPointer(newitempath[0])
                                end = jsonpointer.JsonPointer(newitempath[1])

                                del start.parts[-1], end.parts[-1]
                                vals = start.resolve(respcopy)

                                count = 0

                                for val in vals:
                                    try:
                                        if '$ref' in six.iterkeys(
                                                end.resolve(val)):
                                            end.resolve(val).pop('$ref')
                                            end.resolve(val).update(dictcopy)
                                            replace_pointer = jsonpointer.\
                                                JsonPointer(end.path + jsonpath)

                                            data = replace_pointer.resolve(val)
                                            set_pointer(val, end.path, data)
                                            start.resolve(respcopy)[count].\
                                                                    update(val)

                                            break
                                    except:
                                        count += 1
                            else:
                                itempath = jsonpointer.JsonPointer(itempath)
                                del itempath.parts[-1]

                                if '$ref' in six.iterkeys(
                                        itempath.resolve(respcopy)):
                                    itempath.resolve(respcopy).pop('$ref')
                                    itempath.resolve(respcopy).update(dictcopy)

                if jsonpath:
                    if 'anyOf' in fullpath:
                        continue

                    if not jsonfile:
                        replacepath = jsonpointer.JsonPointer(jsonpath)
                        schemapath = schemapath.replace('/$ref', '')
                        if re.search('\[\d]', schemapath):
                            schemapath = schemapath.translate(None, '[]')
                        schemapath = jsonpointer.JsonPointer(schemapath)
                        data = replacepath.resolve(respcopy)

                        if '$ref' in schemapath.resolve(respcopy):
                            schemapath.resolve(respcopy).pop('$ref')
                            schemapath.resolve(respcopy).update(data)

                    else:
                        if not listmatch:
                            schemapath = schemapath.replace('/$ref', '')
                            replacepath = schemapath + jsonpath
                            replace_pointer = jsonpointer.\
                                                        JsonPointer(replacepath)
                            data = replace_pointer.resolve(respcopy)
                            set_pointer(respcopy, schemapath, data)

            resp.json(respcopy)
        else:
            resp.json(respcopy)

    def check_for_directory(self, matches):
        """Function to allow checking for new directory

        :param matches: current found matches.
        :type matches: dict.

        """
        for match in matches:
            if match.value == self._resourcedir:
                return (True, match)

        return (False, None)

    def branch_worker(self, resp, path, skipinit):
        """Helper for load function, creates threaded worker

        :param resp: response received.
        :type resp: str.
        :param path: path correlating to the response.
        :type path: str.
        :param skipinit: flag to determine if progress bar should be updated.
        :type skipinit: boolean.

        """
        self._visited_urls.append(path.lower())

        member = RisMonolithMemberv100(resp, self.is_redfish)
        if not member.type:
            return

        self.update_member(member)

        if not skipinit:
            self.progress += 1
            if LOGGER.getEffectiveLevel() == 40:
                self.update_progress()

    def update_member(self, member):
        """Adds member to this monolith. If the member already exists the
        data is updated in place.

        :param member: Ris monolith member object made by branch worker.
        :type member: RisMonolithMemberv100.

        """
        if member.maj_type not in self.types:
            self.types[member.maj_type] = OrderedDict()
            self.types[member.maj_type]['Instances'] = list()

        found = False

        for indices in range(len(self.types[member.maj_type]['Instances'])):
            inst = self.types[member.maj_type]['Instances'][indices]

            if inst.resp.request.path == member.resp.request.path:
                self.types[member.maj_type]['Instances'][indices] = member
                self.types[member.maj_type]['Instances'][indices].patches.\
                                    extend([patch for patch in inst.patches])

                found = True
                break

        if not found:
            self.types[member.maj_type]['Instances'].append(member)

    def load_from_dict(self, src):
        """Load data to monolith from dict

        :param src: data receive from rest operation.
        :type src: str.

        """
        self._type = src['Type']
        self._name = src['Name']
        self.types = OrderedDict()

        for typ in src['Types']:
            for inst in typ['Instances']:
                member = RisMonolithMemberv100(None, self.is_redfish)
                member.load_from_dict(inst)
                self.update_member(member)

        return

    def to_dict(self):
        """Convert data to monolith from dict"""
        result = OrderedDict()
        result['Type'] = self.type
        result['Name'] = self.name
        types_list = list()

        for typ in list(self.types.keys()):
            type_entry = OrderedDict()
            type_entry['Type'] = typ
            type_entry['Instances'] = list()

            for inst in self.types[typ]['Instances']:
                type_entry['Instances'].append(inst.to_dict())

            types_list.append(type_entry)

        result['Types'] = types_list
        return result

    def reduce(self):
        """Reduce monolith data"""
        result = OrderedDict()
        result['Type'] = self.type
        result['Name'] = self.name
        types_list = list()

        for typ in list(self.types.keys()):
            type_entry = OrderedDict()
            type_entry['Type'] = typ

            for inst in self.types[typ]['Instances']:
                type_entry['Instances'] = inst.reduce()

            types_list.append(type_entry)

        result['Types'] = types_list
        return result

    def _jsonpath2jsonpointer(self, instr):
        """Convert json path to json pointer

        :param instr: input path to be converted to pointer.
        :type instr: str.

        """
        outstr = instr.replace('.[', '[')
        outstr = outstr.replace('[', '/')
        outstr = outstr.replace(']', '/')

        if outstr.endswith('/'):
            outstr = outstr[:-1]

        return outstr

    def _get_current_location(self):
        """Return current location"""
        return self._current_location

    def _set_current_location(self, newval):
        """Set current location"""
        self._current_location = newval

    location = property(_get_current_location, _set_current_location)

    def list(self, lspath=None):
        """Function for list command

        :param lspath: path list.
        :type lspath: list.

        """
        results = list()
        path_parts = ['Types']  # Types is always assumed

        if isinstance(lspath, list) and len(lspath) > 0:
            lspath = lspath[0]
            path_parts.extend(lspath.split('/'))
        elif not lspath:
            lspath = '/'
        else:
            path_parts.extend(lspath.split('/'))

        currpos = self.to_dict()
        for path_part in path_parts:
            if not path_part:
                continue

            if isinstance(currpos, RisMonolithMemberv100):
                break
            elif isinstance(currpos, dict) and path_part in currpos:
                currpos = currpos[path_part]
            elif isinstance(currpos, list):
                for positem in currpos:
                    if 'Type' in positem and path_part == positem['Type']:
                        currpos = positem
                        break

        results.append(currpos)

        return results

    def killthreads(self):
        """Function to kill threads on logout"""
        threads = []
        for thread in threading.enumerate():
            if isinstance(thread, SuperDuperWorker):
                self.queue.put(('KILL', 'KILL', 'KILL', 'KILL'))
                threads.append(thread)

        for thread in threads:
            thread.join()
コード例 #42
0
    def query_dqsegdb(cls, flags, *args, **kwargs):
        """Query the advanced LIGO DQSegDB for a list of flags.

        Parameters
        ----------
        flags : `iterable`
            A list of flag names for which to query.

        *args
            Either, two `float`-like numbers indicating the
            GPS [start, stop) interval, or a `SegmentList`
            defining a number of summary segments.

        on_error : `str`
            how to handle an error querying for one flag, one of

            - `'raise'` (default): raise the Exception
            - `'warn'`: print a warning
            - `'ignore'`: move onto the next flag as if nothing happened

        url : `str`, optional, default: ``'https://segments.ligo.org'``
            URL of the segment database.

        Returns
        -------
        flagdict : `DataQualityDict`
            An ordered `DataQualityDict` of (name, `DataQualityFlag`)
            pairs.
        """
        # check on_error flag
        on_error = kwargs.pop('on_error', 'raise').lower()
        if on_error not in ['raise', 'warn', 'ignore']:
            raise ValueError("on_error must be one of 'raise', 'warn', "
                             "or 'ignore'")

        # set up threading
        inq = Queue()
        outq = Queue()
        for i in range(len(flags)):
            t = _QueryDQSegDBThread(inq, outq, *args, **kwargs)
            t.setDaemon(True)
            t.start()
        for i, flag in enumerate(flags):
            inq.put((i, flag))

        # capture output
        inq.join()
        outq.join()
        new = cls()
        results = list(zip(*sorted([outq.get() for i in range(len(flags))],
                                   key=lambda x: x[0])))[1]
        for result, flag in zip(results, flags):
            if isinstance(result, Exception):
                result.args = ('%s [%s]' % (str(result), str(flag)),)
                if on_error == 'ignore':
                    pass
                elif on_error == 'warn':
                    warnings.warn(str(result))
                else:
                    raise result
            else:
                new[flag] = result
        return new
コード例 #43
0
ファイル: MainWin.py プロジェクト: pythonthings/CrossMgr
class MainWin( wx.Frame ):
	def __init__( self, parent, id = wx.ID_ANY, title='', size=(1000,800) ):
		wx.Frame.__init__(self, parent, id, title, size=size)
		
		self.db = Database()
		
		self.bufferSecs = 10
		self.setFPS( 30 )
		self.xFinish = None
		
		self.tFrameCount = self.tLaunch = self.tLast = now()
		self.frameCount = 0
		self.fpt = timedelta(seconds=0)
		self.iTriggerSelect = None
		self.triggerInfo = None
		self.tsMax = None
		
		self.captureTimer = wx.CallLater( 10, self.stopCapture )
		
		self.tdCaptureBefore = tdCaptureBeforeDefault
		self.tdCaptureAfter = tdCaptureAfterDefault

		self.config = wx.Config()
		
		self.requestQ = Queue()		# Select photos from photobuf.
		self.dbWriterQ = Queue()	# Photos waiting to be written
		self.messageQ = Queue()		# Collection point for all status/failure messages.
		
		self.SetBackgroundColour( wx.Colour(232,232,232) )
		
		self.focusDialog = FocusDialog( self )
		self.photoDialog = PhotoDialog( self )
		self.autoCaptureDialog = AutoCaptureDialog( self )
		self.triggerDialog = TriggerDialog( self )
				
		mainSizer = wx.BoxSizer( wx.VERTICAL )
		
		#------------------------------------------------------------------------------------------------
		headerSizer = wx.BoxSizer( wx.HORIZONTAL )
		
		self.logo = Utils.GetPngBitmap('CrossMgrHeader.png')
		headerSizer.Add( wx.StaticBitmap(self, wx.ID_ANY, self.logo) )
		
		self.title = wx.StaticText(self, label='CrossMgr Video\nVersion {}'.format(AppVerName.split()[1]), style=wx.ALIGN_RIGHT )
		self.title.SetFont( wx.Font( (0,28), wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL ) )
		headerSizer.Add( self.title, flag=wx.ALL, border=10 )
		
		clock = Clock( self, size=(90,90) )
		clock.SetBackgroundColour( self.GetBackgroundColour() )
		clock.Start()

		headerSizer.Add( clock, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT|wx.RIGHT, border=4 )
		
		#------------------------------------------------------------------------------
		self.cameraDevice = wx.StaticText( self )
		self.cameraResolution = wx.StaticText( self )
		self.targetFPS = wx.StaticText( self, label='30 fps' )
		self.actualFPS = wx.StaticText( self, label='30.0 fps' )
		
		boldFont = self.cameraDevice.GetFont()
		boldFont.SetWeight( wx.BOLD )
		for w in (self.cameraDevice, self.cameraResolution, self.targetFPS, self.actualFPS):
			w.SetFont( boldFont )
		
		fgs = wx.FlexGridSizer( 2, 2, 2 )	# 2 Cols
		fgs.Add( wx.StaticText(self, label='Camera Device:'), flag=wx.ALIGN_RIGHT )
		fgs.Add( self.cameraDevice )
		
		fgs.Add( wx.StaticText(self, label='Resolution:'), flag=wx.ALIGN_RIGHT )
		fgs.Add( self.cameraResolution )
		
		fgs.Add( wx.StaticText(self, label='Target:'), flag=wx.ALIGN_RIGHT )
		fgs.Add( self.targetFPS, flag=wx.ALIGN_RIGHT )
		
		fgs.Add( wx.StaticText(self, label='Actual:'), flag=wx.ALIGN_RIGHT )
		fgs.Add( self.actualFPS, flag=wx.ALIGN_RIGHT )
		
		self.focus = wx.Button( self, label="Focus..." )
		self.focus.Bind( wx.EVT_BUTTON, self.onFocus )
		
		self.reset = wx.Button( self, label="Reset Camera" )
		self.reset.Bind( wx.EVT_BUTTON, self.resetCamera )
		
		self.manage = wx.Button( self, label="Manage Database" )
		self.manage.Bind( wx.EVT_BUTTON, self.manageDatabase )
		
		self.autoCaptureBtn = wx.Button( self, label="Config Auto Capture" )
		self.autoCaptureBtn.Bind( wx.EVT_BUTTON, self.autoCaptureConfig )
		
		self.help = wx.Button( self, wx.ID_HELP )
		self.help.Bind( wx.EVT_BUTTON, self.onHelp )
		
		self.snapshot, self.autoCapture, self.capture = CreateCaptureButtons( self )
		
		self.snapshot.Bind( wx.EVT_LEFT_DOWN, self.onStartSnapshot )
		self.focusDialog.snapshot.Bind( wx.EVT_LEFT_DOWN, self.onStartSnapshot )
		self.autoCapture.Bind( wx.EVT_LEFT_DOWN, self.onStartAutoCapture )
		self.focusDialog.autoCapture.Bind( wx.EVT_LEFT_DOWN, self.onStartAutoCapture )
		self.capture.Bind( wx.EVT_LEFT_DOWN, self.onStartCapture )
		self.capture.Bind( wx.EVT_LEFT_UP, self.onStopCapture )
		self.focusDialog.capture.Bind( wx.EVT_LEFT_DOWN, self.onStartCapture )
		self.focusDialog.capture.Bind( wx.EVT_LEFT_UP, self.onStopCapture )
		
		headerSizer.Add( fgs, flag=wx.ALIGN_CENTER_VERTICAL )
		
		fgs = wx.FlexGridSizer( rows=2, cols=0, hgap=8, vgap=4 )
		
		fgs.Add( self.focus, flag=wx.EXPAND )
		fgs.Add( self.reset, flag=wx.EXPAND )
		fgs.Add( self.manage, flag=wx.EXPAND )
		fgs.Add( self.autoCaptureBtn, flag=wx.EXPAND )
		fgs.Add( self.help, flag=wx.EXPAND )
		
		headerSizer.Add( fgs, flag=wx.ALIGN_CENTRE|wx.LEFT, border=4 )
		headerSizer.AddStretchSpacer()
		
		headerSizer.Add( self.snapshot, flag=wx.ALIGN_CENTRE_VERTICAL|wx.LEFT, border=8 )
		headerSizer.Add( self.autoCapture, flag=wx.ALIGN_CENTRE_VERTICAL|wx.LEFT, border=8 )
		headerSizer.Add( self.capture, flag=wx.ALIGN_CENTRE_VERTICAL|wx.LEFT|wx.RIGHT, border=8 )

		#------------------------------------------------------------------------------
		mainSizer.Add( headerSizer, flag=wx.EXPAND )
		
		#------------------------------------------------------------------------------------------------
		self.finishStrip = FinishStripPanel( self, size=(-1,wx.GetDisplaySize()[1]//2) )
		self.finishStrip.finish.Bind( wx.EVT_RIGHT_DOWN, self.onRightClick )
		
		self.primaryBitmap = ScaledBitmap( self, style=wx.BORDER_SUNKEN, size=(int(imageWidth*0.75), int(imageHeight*0.75)) )
		self.primaryBitmap.SetTestBitmap()
		self.primaryBitmap.Bind( wx.EVT_LEFT_UP, self.onFocus )
		self.primaryBitmap.Bind( wx.EVT_RIGHT_UP, self.onFocus )
		
		hsDate = wx.BoxSizer( wx.HORIZONTAL )
		hsDate.Add( wx.StaticText(self, label='Show Triggers for'), flag=wx.ALIGN_CENTER_VERTICAL )
		tQuery = now()
		self.date = wx.adv.DatePickerCtrl(
			self,
			dt=wx.DateTime.FromDMY( tQuery.day, tQuery.month-1, tQuery.year ),
			style=wx.adv.DP_DROPDOWN|wx.adv.DP_SHOWCENTURY
		)
		self.date.Bind( wx.adv.EVT_DATE_CHANGED, self.onQueryDateChanged )
		hsDate.Add( self.date, flag=wx.LEFT, border=2 )
		
		self.dateSelect = wx.Button( self, label='Select Date' )
		hsDate.Add( self.dateSelect, flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT, border=2 )
		self.dateSelect.Bind( wx.EVT_BUTTON, self.onDateSelect )
		
		hsDate.Add( wx.StaticText(self, label='Filter by Bib'), flag=wx.ALIGN_CENTER_VERTICAL|wx.LEFT, border=12 )
		self.bib = wx.lib.intctrl.IntCtrl( self, style=wx.TE_PROCESS_ENTER, size=(64,-1), min=1, allow_none=True, value=None )
		self.bib.Bind( wx.EVT_TEXT_ENTER, self.onQueryBibChanged )
		hsDate.Add( self.bib, flag=wx.LEFT, border=2 )
		
		self.tsQueryLower = datetime(tQuery.year, tQuery.month, tQuery.day)
		self.tsQueryUpper = self.tsQueryLower + timedelta(days=1)
		self.bibQuery = None
		
		self.triggerList = AutoWidthListCtrl( self, style=wx.LC_REPORT|wx.BORDER_SUNKEN|wx.LC_SORT_ASCENDING|wx.LC_HRULES )
		
		self.il = wx.ImageList(16, 16)
		self.sm_close = []
		for bm in getCloseFinishBitmaps():
			self.sm_close.append( self.il.Add(bm) )
		self.sm_up = self.il.Add( Utils.GetPngBitmap('SmallUpArrow.png'))
		self.sm_up = self.il.Add( Utils.GetPngBitmap('SmallUpArrow.png'))
		self.sm_dn = self.il.Add( Utils.GetPngBitmap('SmallDownArrow.png'))
		self.triggerList.SetImageList(self.il, wx.IMAGE_LIST_SMALL)
		
		self.fieldCol = {f:c for c, f in enumerate('ts bib name team wave race_name note kmh mph frames'.split())}
		headers = ['Time', 'Bib', 'Name', 'Team', 'Wave', 'Race', 'Note', 'km/h', 'mph', 'Frames']
		for i, h in enumerate(headers):
			self.triggerList.InsertColumn(
				i, h,
				wx.LIST_FORMAT_RIGHT if h in ('Bib','km/h','mph','Frames') else wx.LIST_FORMAT_LEFT
			)
		self.itemDataMap = {}
		
		self.triggerList.Bind( wx.EVT_LIST_ITEM_SELECTED, self.onTriggerSelected )
		self.triggerList.Bind( wx.EVT_LIST_ITEM_ACTIVATED, self.onTriggerEdit )
		self.triggerList.Bind( wx.EVT_LIST_ITEM_RIGHT_CLICK, self.onTriggerRightClick )
		#self.triggerList.Bind( wx.EVT_LIST_DELETE_ITEM, self.onTriggerDelete )
		
		vsTriggers = wx.BoxSizer( wx.VERTICAL )
		vsTriggers.Add( hsDate )
		vsTriggers.Add( self.triggerList, 1, flag=wx.EXPAND|wx.TOP, border=2)
		
		#------------------------------------------------------------------------------------------------
		mainSizer.Add( self.finishStrip, 1, flag=wx.EXPAND )
		
		border=2
		row1Sizer = wx.BoxSizer( wx.HORIZONTAL )
		row1Sizer.Add( self.primaryBitmap, flag=wx.ALL, border=border )
		row1Sizer.Add( vsTriggers, 1, flag=wx.TOP|wx.BOTTOM|wx.RIGHT|wx.EXPAND, border=border )
		mainSizer.Add( row1Sizer, flag=wx.EXPAND )
				
		self.Bind(wx.EVT_CLOSE, self.onCloseWindow)

		self.readOptions()
		self.updateFPS( int(float(self.targetFPS.GetLabel().split()[0])) )
		self.updateAutoCaptureLabel()
		self.SetSizerAndFit( mainSizer )
		
		# Start the message reporting thread so we can see what is going on.
		self.messageThread = threading.Thread( target=self.showMessages )
		self.messageThread.daemon = True
		self.messageThread.start()
		
		wx.CallLater( 300, self.refreshTriggers )
	
	def onHelp( self, event ):
		OpenHelp()
	
	def setFPS( self, fps ):
		self.fps = int(fps if fps > 0 else 30)
		self.frameDelay = 1.0 / self.fps
		self.frameCountUpdate = int(self.fps * 2)
	
	def updateFPS( self, fps ):
		self.setFPS( fps )
		self.targetFPS.SetLabel( u'{} fps'.format(self.fps) )

	def updateActualFPS( self, actualFPS ):
		self.actualFPS.SetLabel( '{:.1f} fps'.format(actualFPS) )

	def updateAutoCaptureLabel( self ):
		def f( n ):
			s = '{:0.1f}'.format( n )
			return s[:-2] if s.endswith('.0') else s
		
		label = u'\n'.join( [u'AUTO',u'CAPTURE',u'{} .. {}'.format(f(-self.tdCaptureBefore.total_seconds()), f(self.tdCaptureAfter.total_seconds()))] )
		for btn in (self.autoCapture, self.focusDialog.autoCapture):
			btn.SetLabel( label )
			btn.SetFontToFitLabel()
			wx.CallAfter( btn.Refresh )

	def setQueryDate( self, d ):
		self.tsQueryLower = d
		self.tsQueryUpper = self.tsQueryLower + timedelta( days=1 )
		self.refreshTriggers( True )
		
	def onDateSelect( self, event ):
		triggerDates = self.db.getTriggerDates()
		triggerDates.sort( reverse=True )
		with DateSelectDialog( self, triggerDates ) as dlg:
			if dlg.ShowModal() == wx.ID_OK and dlg.GetDate():
				self.setQueryDate( dlg.GetDate() )
			
	def onQueryDateChanged( self, event ):
		v = self.date.GetValue()
		self.setQueryDate( datetime( v.GetYear(), v.GetMonth() + 1, v.GetDay() ) )
	
	def onQueryBibChanged( self, event ):
		self.bibQuery = self.bib.GetValue()
		self.refreshTriggers( True )
	
	def GetListCtrl( self ):
		return self.triggerList
	
	def GetSortImages(self):
		return (self.sm_dn, self.sm_up)
	
	def getItemData( self, i ):
		data = self.triggerList.GetItemData( i )
		return self.itemDataMap[data]
	
	def getTriggerRowFromID( self, id ):
		for row in six.moves.range(self.triggerList.GetItemCount()-1, -1, -1):
			if self.itemDataMap[row][0] == id:
				return row
		return None

	def updateTriggerRow( self, row, fields ):
		if 'last_name' in fields and 'first_name' in fields:
			fields['name'] = u', '.join( n for n in (fields['last_name'], fields['first_name']) if n )
		for k, v in six.iteritems(fields):
			if k in self.fieldCol:
				if k == 'bib':
					v = u'{:>6}'.format(v)
				elif k == 'frames':
					v = six.text_type(v) if v else u''
				else:
					v = six.text_type(v)
				self.triggerList.SetItem( row, self.fieldCol[k], v )
				
	def updateTriggerRowID( self, id, fields ):
		row = self.getTriggerRowFromID( id )
		if row is not None:
			self.updateTriggerRow( row, fields )
	
	def getTriggerInfo( self, row ):
		data = self.itemDataMap[self.triggerList.GetItemData(row)]
		return {
			a:data[i] for i, a in enumerate((
				'id','ts','s_before','s_after','ts_start',
				'bib','name','team','wave','race_name',
				'first_name','last_name','note','kmh','frames'))
		}
	
	def refreshTriggers( self, replace=False, iTriggerRow=None ):
		tNow = now()
		self.lastTriggerRefresh = tNow
		
		# replace = True
		if replace:
			tsLower = self.tsQueryLower
			tsUpper = self.tsQueryUpper
			self.triggerList.DeleteAllItems()
			self.itemDataMap = {}
			self.tsMax = None
			self.iTriggerSelect = None
			self.triggerInfo = {}
			self.finishStrip.SetTsJpgs( None, None )
		else:
			tsLower = (self.tsMax or datetime(tNow.year, tNow.month, tNow.day)) + timedelta(seconds=0.00001)
			tsUpper = tsLower + timedelta(days=1)

		triggers = self.db.getTriggers( tsLower, tsUpper, self.bibQuery )
			
		tsPrev = (self.tsMax or datetime(2000,1,1))
		if triggers:
			self.tsMax = triggers[-1][1] # id,ts,s_before,s_after,ts_start,bib,first_name,last_name,team,wave,race_name,note,kmh,frames
		
		zeroFrames, tsLower, tsUpper = [], datetime.max, datetime.min
		for i, (id,ts,s_before,s_after,ts_start,bib,first_name,last_name,team,wave,race_name,note,kmh,frames) in enumerate(triggers):
			if s_before == 0.0 and s_after == 0.0:
				s_before,s_after = tdCaptureBeforeDefault.total_seconds(),tdCaptureAfterDefault.total_seconds()
			
			dtFinish = (ts-tsPrev).total_seconds()
			itemImage = self.sm_close[min(len(self.sm_close)-1, int(len(self.sm_close) * dtFinish / closeFinishThreshold))]		
			row = self.triggerList.InsertItem( 999999, ts.strftime('%H:%M:%S.%f')[:-3], itemImage )
			
			if not frames:
				tsLower = min( tsLower, ts-timedelta(seconds=s_before) )
				tsU = ts + timedelta(seconds=s_after)
				tsUpper = max( tsUpper,tsU )
				zeroFrames.append( (row, id, tsU) )
			
			kmh_text, mph_text = (u'{:.2f}'.format(kmh), u'{:.2f}'.format(kmh * 0.621371)) if kmh else (u'', u'')
			fields = {
				'bib':			bib,
				'last_name':	last_name,
				'first_name':	first_name,
				'team':			team,
				'wave':			wave,
				'race_name':	race_name,
				'note':			note,
				'kmh':			kmh_text,
				'mph':			mph_text,
				'frames':		frames,
			}
			self.updateTriggerRow( row, fields )
			
			self.triggerList.SetItemData( row, row )
			self.itemDataMap[row] = (id,ts,s_before,s_after,ts_start,bib,fields['name'],team,wave,race_name,first_name,last_name,note,kmh,frames)
			tsPrev = ts
		
		if zeroFrames:
			counts = self.db.getTriggerPhotoCounts( tsLower, tsUpper )
			values = {'frames':0}
			for row, id, tsU in zeroFrames:
				values['frames'] = counts[id]
				self.updateTriggerRow( row, values )
				# Don't update the trigger if the number of frames is possibly not known yet.
				if (tNow - tsU).total_seconds() < 5.0*60.0:
					del counts[id]
			self.db.updateTriggerPhotoCounts( counts )
			
		for i in six.moves.range(self.triggerList.GetColumnCount()):
			self.triggerList.SetColumnWidth(i, wx.LIST_AUTOSIZE)

		if iTriggerRow is not None:
			iTriggerRow = min( max(0, iTriggerRow), self.triggerList.GetItemCount()-1 )
			self.triggerList.EnsureVisible( iTriggerRow )
			self.triggerList.Select( iTriggerRow )
		else:
			if self.triggerList.GetItemCount() >= 1:
				self.triggerList.EnsureVisible( self.triggerList.GetItemCount()-1 )

	def Start( self ):
		self.messageQ.put( ('', '************************************************') )
		self.messageQ.put( ('started', now().strftime('%Y/%m/%d %H:%M:%S')) )
		self.startThreads()

	def updateSnapshot( self, t, f ):
		self.snapshotCount = getattr(self, 'snapshotCount', 0) + 1
		self.dbWriterQ.put( ('photo', t, f) )
		self.dbWriterQ.put( (
			'trigger',
			t,
			0.00001,		# s_before
			0.00001,		# s_after
			t,
			self.snapshotCount,	# bib
			u'', 			# first_name
			u'Snapshot',	# last_name
			u'',			# team
			u'',			# save
			u'',			# race_name
		) )
		self.doUpdateAutoCapture( t, self.snapshotCount, [self.snapshot, self.focusDialog.snapshot], snapshotEnableColour )
		
	def onStartSnapshot( self, event ):
		event.GetEventObject().SetForegroundColour( snapshotDisableColour )
		wx.CallAfter( event.GetEventObject().Refresh )
		self.camInQ.put( {'cmd':'snapshot'} )
		
	def doUpdateAutoCapture( self, tStartCapture, count, btn, colour ):
		self.dbWriterQ.put( ('flush',) )
		self.dbWriterQ.join()
		triggers = self.db.getTriggers( tStartCapture, tStartCapture, count )
		if triggers:
			id = triggers[0][0]
			self.db.initCaptureTriggerData( id )
			self.refreshTriggers( iTriggerRow=999999, replace=True )
			self.showLastTrigger()
			self.onTriggerSelected( iTriggerSelect=self.triggerList.GetItemCount()-1 )
		for b in (btn if isinstance(btn, list) else [btn]):
			b.SetForegroundColour( colour )
			wx.CallAfter( b.Refresh )

	def onStartAutoCapture( self, event ):
		tNow = now()
		
		event.GetEventObject().SetForegroundColour( autoCaptureDisableColour )
		wx.CallAfter( event.GetEventObject().Refresh )
		
		self.autoCaptureCount = getattr(self, 'autoCaptureCount', 0) + 1
		s_before, s_after = self.tdCaptureBefore.total_seconds(), self.tdCaptureAfter.total_seconds()
		self.requestQ.put( {
				'time':tNow,
				's_before':s_before,
				's_after':s_after,
				'ts_start':tNow,
				'bib':self.autoCaptureCount,
				'last_name':u'Auto',
			}
		)
		
		wx.CallLater( int(CamServer.EstimateQuerySeconds(tNow, s_before, s_after, self.fps)*1000.0) + 80,
			self.doUpdateAutoCapture, tNow, self.autoCaptureCount, self.autoCapture, autoCaptureEnableColour
		)
		
	def onStartCapture( self, event ):
		tNow = self.tStartCapture = now()
		
		event.GetEventObject().SetForegroundColour( captureDisableColour )
		wx.CallAfter( event.GetEventObject().Refresh )
		wx.BeginBusyCursor()
		
		self.captureCount = getattr(self, 'captureCount', 0) + 1
		self.requestQ.put( {
				'time':tNow,
				's_before':0.0,
				's_after':self.tdCaptureAfter.total_seconds(),
				'ts_start':tNow,
				'bib':self.captureCount,
				'last_name':u'Capture',
			}
		)
		self.camInQ.put( {'cmd':'start_capture', 'tStart':tNow-self.tdCaptureBefore} )
	
	def showLastTrigger( self ):
		iTriggerRow = self.triggerList.GetItemCount() - 1
		if iTriggerRow < 0:
			return
		self.triggerList.EnsureVisible( iTriggerRow )
		for r in six.moves.range(self.triggerList.GetItemCount()-1):
			self.triggerList.Select(r, 0)
		self.triggerList.Select( iTriggerRow )		
	
	def onStopCapture( self, event ):
		self.camInQ.put( {'cmd':'stop_capture'} )
		triggers = self.db.getTriggers( self.tStartCapture, self.tStartCapture, self.captureCount )
		if triggers:
			id = triggers[0][0]
			self.db.updateTriggerBeforeAfter(
				id,
				0.0,
				(now() - self.tStartCapture).total_seconds()
			)
			self.db.initCaptureTriggerData( id )
			self.refreshTriggers( iTriggerRow=999999, replace=True )
		
		self.showLastTrigger()
		
		wx.EndBusyCursor()
		event.GetEventObject().SetForegroundColour( captureEnableColour )
		wx.CallAfter( event.GetEventObject().Refresh )
		
		def updateFS():
			# Wait for all the photos to be written.
			self.dbWriterQ.put( ('flush',) )
			self.dbWriterQ.join()
			# Update the finish strip.
			wx.CallAfter( self.onTriggerSelected, iTriggerSelect=self.triggerList.GetItemCount() - 1 )

		threading.Thread( target=updateFS ).start()

	def autoCaptureConfig( self, event ):
		self.autoCaptureDialog.set( self.tdCaptureBefore.total_seconds(), self.tdCaptureAfter.total_seconds() )
		if self.autoCaptureDialog.ShowModal() == wx.ID_OK:
			s_before, s_after = self.autoCaptureDialog.get()
			self.tdCaptureBefore = timedelta(seconds=s_before) if s_before is not None else tdCaptureBeforeDefault
			self.tdCaptureAfter  = timedelta(seconds=s_after)  if s_after  is not None else tdCaptureAfterDefault
			self.writeOptions()
			self.updateAutoCaptureLabel()
 		
	def onFocus( self, event ):
		if self.focusDialog.IsShown():
			return
		self.focusDialog.Move((4,4))
		self.camInQ.put( {'cmd':'send_update', 'name':'focus', 'freq':1} )
		self.focusDialog.Show()
	
	def onRightClick( self, event ):
		if not self.triggerInfo:
			return
		
		self.xFinish = event.GetX()
		self.photoDialog.set( self.finishStrip.finish.getIJpg(self.xFinish), self.triggerInfo, self.finishStrip.GetTsJpgs(), self.fps,
			self.doTriggerEdit
		)
		self.photoDialog.CenterOnParent()
		self.photoDialog.Move( self.photoDialog.GetScreenPosition().x, 0 )
		self.photoDialog.ShowModal()
		if self.triggerInfo['kmh'] != (self.photoDialog.kmh or 0.0):
			self.db.updateTriggerKMH( self.triggerInfo['id'], self.photoDialog.kmh or 0.0 )
			self.refreshTriggers( replace=True, iTriggerRow=self.iTriggerSelect )
		self.photoDialog.clear()

	def onTriggerSelected( self, event=None, iTriggerSelect=None ):
		self.iTriggerSelect = event.Index if iTriggerSelect is None else iTriggerSelect
		if self.iTriggerSelect >= self.triggerList.GetItemCount():
			self.ts = None
			self.tsJpg = []
			self.finishStrip.SetTsJpgs( self.tsJpg, self.ts, {} )
			return
		
		data = self.itemDataMap[self.triggerList.GetItemData(self.iTriggerSelect)]
		self.triggerInfo = self.getTriggerInfo( self.iTriggerSelect )
		self.ts = self.triggerInfo['ts']
		s_before, s_after = abs(self.triggerInfo['s_before']), abs(self.triggerInfo['s_after'])
		if s_before == 0.0 and s_after == 0.0:
			s_before, s_after = tdCaptureBeforeDefault.total_seconds(), tdCaptureAfterDefault.total_seconds()
		
		# Update the screen in the background so we don't freeze the UI.
		def updateFS( triggerInfo ):
			self.ts = triggerInfo['ts']
			self.tsJpg = self.db.clone().getPhotos( self.ts - timedelta(seconds=s_before), self.ts + timedelta(seconds=s_after) )
			triggerInfo['frames'] = len(self.tsJpg)
			wx.CallAfter( self.finishStrip.SetTsJpgs, self.tsJpg, self.ts, triggerInfo )
			
		threading.Thread( target=updateFS, args=(self.triggerInfo,) ).start()
	
	def onTriggerRightClick( self, event ):
		self.iTriggerSelect = event.Index
		if not hasattr(self, "triggerDeleteID"):
			self.triggerDeleteID = wx.NewId()
			self.triggerEditID = wx.NewId()
			self.Bind(wx.EVT_MENU, lambda event: self.doTriggerDelete(), id=self.triggerDeleteID)
			self.Bind(wx.EVT_MENU, lambda event: self.doTriggerEdit(),   id=self.triggerEditID)

		menu = wx.Menu()
		menu.Append(self.triggerEditID,   "Edit...")
		menu.Append(self.triggerDeleteID, "Delete...")

		self.PopupMenu(menu)
		menu.Destroy()
		
	def doTriggerDelete( self, confirm=True ):
		triggerInfo = self.getTriggerInfo( self.iTriggerSelect )
		message = u', '.join( f for f in (triggerInfo['ts'].strftime('%H:%M:%S.%f')[:-3], six.text_type(triggerInfo['bib']),
			triggerInfo['name'], triggerInfo['team'], triggerInfo['wave'], triggerInfo['race_name']) if f )
		if not confirm or wx.MessageDialog( self, u'{}:\n\n{}'.format(u'Confirm Delete', message), u'Confirm Delete',
				style=wx.OK|wx.CANCEL|wx.ICON_QUESTION ).ShowModal() == wx.ID_OK:		
			self.db.deleteTrigger( triggerInfo['id'], self.tdCaptureBefore.total_seconds(), self.tdCaptureAfter.total_seconds() )
			self.refreshTriggers( replace=True, iTriggerRow=self.iTriggerSelect )
	
	def onTriggerDelete( self, event ):
		self.iTriggerSelect = event.Index
		self.doTriggerDelete()
		
	def doTriggerEdit( self ):
		data = self.itemDataMap[self.triggerList.GetItemData(self.iTriggerSelect)]
		self.triggerDialog.set( self.db, data[0] )
		self.triggerDialog.CenterOnParent()
		if self.triggerDialog.ShowModal() == wx.ID_OK:
			row = self.iTriggerSelect
			fields = {f:v for f, v in zip(Database.triggerEditFields,self.triggerDialog.get())}
			self.updateTriggerRow( row, fields )
			self.triggerInfo.update( fields )
		return self.triggerInfo
	
	def onTriggerEdit( self, event ):
		self.iTriggerSelect = event.Index
		self.doTriggerEdit()
	
	def showMessages( self ):
		while 1:
			message = self.messageQ.get()
			assert len(message) == 2, 'Incorrect message length'
			cmd, info = message
			six.print_( 'Message:', '{}:  {}'.format(cmd, info) if cmd else info )
			#wx.CallAfter( self.messageManager.write, '{}:  {}'.format(cmd, info) if cmd else info )
	
	def delayRefreshTriggers( self ):
		if not hasattr(self, 'refreshTimer') or not self.refreshTimer.IsRunning():
			self.resetTimer = wx.CallLater( 1000, self.refreshTriggers )

	def startThreads( self ):
		self.grabFrameOK = False
		
		self.listenerThread = SocketListener( self.requestQ, self.messageQ )
		error = self.listenerThread.test()
		if error:
			wx.MessageBox('Socket Error:\n\n{}\n\nIs another CrossMgrVideo or CrossMgrCamera running on this computer?'.format(error),
				"Socket Error",
				wx.OK | wx.ICON_ERROR
			)
			wx.Exit()
		
		self.camInQ, self.camReader = CamServer.getCamServer( self.getCameraInfo() )
		self.cameraThread = threading.Thread( target=self.processCamera )
		self.cameraThread.daemon = True
		
		self.eventThread = threading.Thread( target=self.processRequests )
		self.eventThread.daemon = True
		
		self.dbWriterThread = threading.Thread( target=DBWriter, args=(self.dbWriterQ, lambda: wx.CallAfter(self.delayRefreshTriggers), self.db.fname) )
		self.dbWriterThread.daemon = True
		
		self.cameraThread.start()
		self.eventThread.start()
		self.dbWriterThread.start()
		self.listenerThread.start()
		
		self.grabFrameOK = True
		self.messageQ.put( ('threads', 'Successfully Launched') )
		
		self.primaryFreq = 5
		self.camInQ.put( {'cmd':'send_update', 'name':'primary', 'freq':self.primaryFreq} )
		return True
	
	def stopCapture( self ):
		self.dbWriterQ.put( ('flush',) )
	
	def processCamera( self ):
		lastFrame = None
		lastPrimaryTime = now()
		primaryCount = 0
		while 1:
			try:
				msg = self.camReader.recv()
			except EOFError:
				break
			
			cmd = msg['cmd']
			if cmd == 'response':
				for t, f in msg['ts_frames']:
					self.dbWriterQ.put( ('photo', t, f) )
					lastFrame = f
			elif cmd == 'update':
				name, lastFrame = msg['name'], lastFrame if msg['frame'] is None else msg['frame']
				if lastFrame is not None:
					if name == 'primary':
						wx.CallAfter( self.primaryBitmap.SetBitmap, CVUtil.frameToBitmap(lastFrame) )
						
						primaryCount += self.primaryFreq
						primaryTime = now()
						primaryDelta = (primaryTime - lastPrimaryTime).total_seconds()
						if primaryDelta > 2.5:
							wx.CallAfter( self.updateActualFPS, primaryCount / primaryDelta )
							lastPrimaryTime = primaryTime
							primaryCount = 0
					elif name == 'focus':
						if self.focusDialog.IsShown():
							wx.CallAfter( self.focusDialog.SetBitmap, CVUtil.frameToBitmap(lastFrame) )
						else:
							self.camInQ.put( {'cmd':'cancel_update', 'name':'focus'} )
			elif cmd == 'snapshot':
				lastFrame = lastFrame if msg['frame'] is None else msg['frame']
				wx.CallAfter( self.updateSnapshot,  msg['ts'], lastFrame )
			elif cmd == 'terminate':
				break
		
	def processRequests( self ):
		def refresh():
			self.dbWriterQ.put( ('flush',) )
	
		while 1:
			msg = self.requestQ.get()
			
			tSearch = msg['time']
			advanceSeconds = msg.get('advanceSeconds', 0.0)
			tSearch += timedelta(seconds=advanceSeconds)
			
			# Record this trigger.
			self.dbWriterQ.put( (
				'trigger',
				tSearch - timedelta(seconds=advanceSeconds),
				msg.get('s_before', self.tdCaptureBefore.total_seconds()),	# Use the configured capture interval, not the default.
				msg.get('s_after', self.tdCaptureAfter.total_seconds()),
				msg.get('ts_start', None) or now(),
				msg.get('bib', 99999),
				msg.get('first_name',u'') or msg.get('firstName',u''),
				msg.get('last_name',u'') or msg.get('lastName',u''),
				msg.get('team',u''),
				msg.get('wave',u''),
				msg.get('race_name',u'') or msg.get('raceName',u''),
			) )
			# Record the video frames for the trigger.
			tStart, tEnd = tSearch-self.tdCaptureBefore, tSearch+self.tdCaptureAfter
			self.camInQ.put( { 'cmd':'query', 'tStart':tStart, 'tEnd':tEnd,} )
			wx.CallAfter( wx.CallLater, max(100, int(100+1000*(tEnd-now()).total_seconds())), refresh )
	
	def shutdown( self ):
		# Ensure that all images in the queue are saved.
		if hasattr(self, 'dbWriterThread'):
			self.camInQ.put( {'cmd':'terminate'} )
			self.dbWriterQ.put( ('terminate', ) )
			self.dbWriterThread.join( 2.0 )
			
	def setDBName( self, dbName ):
		if dbName != self.db.fname:
			if hasattr(self, 'dbWriterThread'):
				self.dbWriterQ.put( ('terminate', ) )
				self.dbWriterThread.join()
			try:
				self.db = Database( dbName )
			except:
				self.db = Database()
			
			self.dbWriterQ = Queue()
			self.dbWriterThread = threading.Thread( target=DBWriter, args=(self.dbWriterQ, lambda: wx.CallAfter(self.delayRefreshTriggers), self.db.fname) )
			self.dbWriterThread.daemon = True
			self.dbWriterThread.start()
	
	def resetCamera( self, event=None ):
		dlg = ConfigDialog( self, self.getCameraDeviceNum(), self.fps, self.getCameraResolution() )
		ret = dlg.ShowModal()
		cameraDeviceNum = dlg.GetCameraDeviceNum()
		cameraResolution = dlg.GetCameraResolution()
		fps = dlg.GetFPS()
		dlg.Destroy()
		if ret != wx.ID_OK:
			return False
		
		self.setCameraDeviceNum( cameraDeviceNum )
		self.setCameraResolution( *cameraResolution )
		self.updateFPS( fps )
		self.writeOptions()
		
		if hasattr(self, 'camInQ'):
			self.camInQ.put( {'cmd':'cam_info', 'info':self.getCameraInfo(),} )
			
		self.GetSizer().Layout()
		return True
	
	def manageDatabase( self, event ):
		trigFirst, trigLast = self.db.getTimestampRange()
		dlg = ManageDatabase( self, self.db.getsize(), self.db.fname, trigFirst, trigLast, title='Manage Database' )
		if dlg.ShowModal() == wx.ID_OK:
			work = wx.BusyCursor()
			tsLower, tsUpper, vacuum, dbName = dlg.GetValues()
			self.setDBName( dbName )
			if tsUpper:
				tsUpper = datetime.combine( tsUpper, time(23,59,59,999999) )
			self.db.cleanBetween( tsLower, tsUpper )
			if vacuum:
				self.db.vacuum()
			wx.CallAfter( self.finishStrip.Clear )
			wx.CallAfter( self.refreshTriggers, True )
		dlg.Destroy()
	
	def setCameraDeviceNum( self, num ):
		self.cameraDevice.SetLabel( six.text_type(num) )
		
	def setCameraResolution( self, width, height ):
		self.cameraResolution.SetLabel( u'{}x{}'.format(width, height) )
			
	def getCameraDeviceNum( self ):
		return int(self.cameraDevice.GetLabel())
		
	def getCameraFPS( self ):
		return int(float(self.targetFPS.GetLabel().split()[0]))
		
	def getCameraResolution( self ):
		try:
			resolution = [int(v) for v in self.cameraResolution.GetLabel().split('x')]
			return resolution[0], resolution[1]
		except:
			return 640, 480
		
	def onCloseWindow( self, event ):
		self.shutdown()
		wx.Exit()
		
	def writeOptions( self ):
		self.config.Write( 'DBName', self.db.fname )
		self.config.Write( 'CameraDevice', self.cameraDevice.GetLabel() )
		self.config.Write( 'CameraResolution', self.cameraResolution.GetLabel() )
		self.config.Write( 'FPS', self.targetFPS.GetLabel() )
		self.config.Write( 'SecondsBefore', '{:.3f}'.format(self.tdCaptureBefore.total_seconds()) )
		self.config.Write( 'SecondsAfter', '{:.3f}'.format(self.tdCaptureAfter.total_seconds()) )
		self.config.Flush()
	
	def readOptions( self ):
		self.setDBName( self.config.Read('DBName', '') )
		self.cameraDevice.SetLabel( self.config.Read('CameraDevice', u'0') )
		self.cameraResolution.SetLabel( self.config.Read('CameraResolution', u'640x480') )
		self.targetFPS.SetLabel( self.config.Read('FPS', u'30.000') )
		s_before = self.config.Read('SecondsBefore', u'0.5')
		s_after = self.config.Read('SecondsAfter', u'2.0')
		try:
			self.tdCaptureBefore = timedelta(seconds=abs(float(s_before)))
		except:
			pass
		try:
			self.tdCaptureAfter = timedelta(seconds=abs(float(s_after)))
		except:
			pass
		
	def getCameraInfo( self ):
		width, height = self.getCameraResolution()
		return {'usb':self.getCameraDeviceNum(), 'fps':self.getCameraFPS(), 'width':width, 'height':height}