Ejemplo n.º 1
0
def test_inceptionresnetv2_notop():
    def target(queue):
        model = applications.InceptionResNetV2(weights=None, include_top=False)
        queue.put(model.output_shape)

    global_image_data_format = K.image_data_format()
    queue = Queue()

    K.set_image_data_format('channels_first')
    p = Process(target=target, args=(queue,))
    p.start()
    p.join()
    K.set_image_data_format(global_image_data_format)
    assert not queue.empty(), 'Model creation failed.'
    model_output_shape = queue.get_nowait()
    assert model_output_shape == (None, 1536, None, None)

    K.set_image_data_format('channels_last')
    p = Process(target=target, args=(queue,))
    p.start()
    p.join()
    K.set_image_data_format(global_image_data_format)
    assert not queue.empty(), 'Model creation failed.'
    model_output_shape = queue.get_nowait()
    assert model_output_shape == (None, None, None, 1536)
Ejemplo n.º 2
0
class CommandQueue():

    _STATE_NORMAL = 'normal'
    _STATE_FLUSHING = 'flushing'
    _STATE_FINISHED = 'finished'

    def __init__(self):
        self._in = Queue()
        self._out = Queue()
        self._state = self._STATE_NORMAL
        self._state_lock = Lock()
        self._terminating_commands = (
                'notify_kunquat_exception', 'notify_libkunquat_error')

    def update(self):
        in_count = self._in.qsize()
        with self._state_lock:
            if self._state == self._STATE_FLUSHING:
                get_counter = repeat(True)
            else:
                get_counter = range(in_count)

        for _ in get_counter:
            try:
                command_data = self._in.get_nowait()
            except Empty:
                return

            command, _ = command_data
            if command in self._terminating_commands:
                # Make sure we won't block the UI before the terminating command is sent
                with self._state_lock:
                    self._state = self._STATE_FLUSHING

            self._out.put(command_data)

    def put(self, command, *args):
        with self._state_lock:
            is_state_normal = (self._state == self._STATE_NORMAL)
        if is_state_normal:
            self._in.put((command, args))

    def get(self):
        command_data = self._out.get_nowait()
        command, _ = command_data
        if command in self._terminating_commands:
            with self._state_lock:
                self._state = self._STATE_FINISHED
        return command_data

    def get_command_count(self):
        return self._out.qsize()
Ejemplo n.º 3
0
def stress_test ( addr, port, filename, totalcount, processes):
    from multiprocessing import Process, Queue
    import Queue as ThQueue
    qfrom = Queue()
    qto = Queue()
    
    for i in xrange(0,totalcount): qfrom.put ( (i, addr, port, filename) )
    p = [Process(target=stressing_tester, args=(qfrom,qto))  for i in range (0, processes)]
    map (lambda x: x.start(), p)
    map (lambda x: x.join(), p)
    try:
        while True:
            print qto.get_nowait()
    except ThQueue.Empty:
        pass        
Ejemplo n.º 4
0
class Webserver():
    def __init__(self):
        TMP_BC = "/tmp/buildchimp_web"
        self.q_process_to_main = Queue()
        self.basepath = TMP_BC
        self.process = None
        self.pngPath = TMP_BC + "/screenshot.png"
        self.serving_string = False

    def __del__(self):
        self.stop()

    def __str__(self):
        if not self.serving_string:
            try:
                self.serving_string = self.q_process_to_main.get_nowait()
            except: pass
        return self.serving_string if self.servingstring else ""

    def start(self):
        self_copy = copy.copy(self)
        self.process = Process(target = serve, args = (self_copy, self.q_process_to_main) )
        self.process.start()

    def stop(self):
        if self.process:
            self.process.terminate()

    def wait_on_msg(self, timeout=10):
        try:
            got = self.q_process_to_main.get(block=True, timeout=timeout)
            return got
        except:
            return None
Ejemplo n.º 5
0
    def instantiate(self, stream=None):
        """ Start a local worker process

        Blocks until the process is up and the center is properly informed
        """
        if self.process and self.process.is_alive():
            raise ValueError("Existing process still alive. Please kill first")
        q = Queue()
        self.process = Process(target=run_worker,
                               args=(q, self.ip, self.center.ip,
                                     self.center.port, self.ncores,
                                     self.port, self._given_worker_port,
                                     self.local_dir, self.services, self.name))
        self.process.daemon = True
        self.process.start()
        while True:
            try:
                msg = q.get_nowait()
                if isinstance(msg, Exception):
                    raise msg
                self.worker_port = msg['port']
                assert self.worker_port
                self.worker_dir = msg['dir']
                break
            except queues.Empty:
                yield gen.sleep(0.1)
        logger.info("Nanny %s:%d starts worker process %s:%d",
                    self.ip, self.port, self.ip, self.worker_port)
        q.close()
        raise gen.Return('OK')
Ejemplo n.º 6
0
class PPool(object):
    def __init__(self, func, single=False):
        self.func = func
        self.processes = []
        self.single = single
        self.queue = Queue()

    def submit(self, *args, **kwargs):
        kwargs["queue"] = self.queue
        p = Process(target=self.func, args=args, kwargs=kwargs)
        self.processes.append(p)
        p.start()
        if self.single:
            p.join()

    def __enter__(self):
        return self

    def __exit__(self, x, y, z):
        for p in self.processes:
            p.join()
        log = ''
        while not self.queue.empty():
            l = self.queue.get_nowait()
            log += l
            logging.error(l)
        if "Critical error!" in log:
            exit(1)
Ejemplo n.º 7
0
    def instantiate(self, stream=None, environment=None):
        """ Start a local worker process

        Blocks until the process is up and the scheduler is properly informed
        """
        if environment:
            if not os.path.isabs(environment):
                environment = os.path.join(self.local_dir, environment)
            self.environment = environment

        with log_errors():
            if self.process and isalive(self.process):
                raise ValueError("Existing process still alive. Please kill first")

            if self.environment != nanny_environment:
                with tmpfile() as fn:
                    self.process = run_worker_subprocess(self.environment, self.ip,
                            self.scheduler.ip, self.scheduler.port, self.ncores,
                            self.port, self._given_worker_port, self.name,
                            self.memory_limit, self.loop, fn, self.quiet)

                    while not os.path.exists(fn):
                        yield gen.sleep(0.01)

                    while True:
                        try:
                            with open(fn) as f:
                                msg = json.load(f)
                            self.worker_port = msg['port']
                            self.worker_dir = msg['local_directory']
                            break
                        except JSONDecodeError:
                            yield gen.sleep(0.01)
            else:
                q = Queue()
                self.process = Process(target=run_worker_fork,
                                       args=(q, self.ip, self.scheduler.ip,
                                             self.scheduler.port, self.ncores,
                                             self.port, self._given_worker_port,
                                             self.local_dir, self.services, self.name,
                                             self.memory_limit))
                self.process.daemon = True
                self.process.start()
                while True:
                    try:
                        msg = q.get_nowait()
                        if isinstance(msg, Exception):
                            raise msg
                        self.worker_port = msg['port']
                        self.worker_dir = msg['dir']
                        assert self.worker_port
                        break
                    except queues.Empty:
                        yield gen.sleep(0.1)



            logger.info("Nanny %s:%d starts worker process %s:%d",
                        self.ip, self.port, self.ip, self.worker_port)
            raise gen.Return('OK')
Ejemplo n.º 8
0
Archivo: Quu.py Proyecto: vergiliu/ps
class Quu(): #Singleton
    def __init__(self):
        self.queue = Queue()
        logger.debug('new queue')

    def addFolders(self, aLeftFolder, aRightFolder):
        logger.debug('adding new item in the quu')
        myComparator = FolderComparator(aLeftFolder, aRightFolder)
        myComparator.setSyncType("keepboth")
        self.queue.put(myComparator)

    def getQuu(self):
        return self.queue

    def getNext(self):
        """
        @return FolderComparator the folder
        """
        try:
            return self.queue.get_nowait()
        except BaseException:
            return None

    def getSize(self):
        return self.queue.qsize()
Ejemplo n.º 9
0
    def execute_action(self, action):
        event = Event()
        queue = Queue()
        proc = Process(
            target=execute_action_proc,
            args=(self.execute, action, event, queue))
        proc.start()

        # Send heartbeat.
        heartbeat_retry = 0
        while not event.is_set():
            event.wait(config.ACTIVITY_HEARTBEAT_INTERVAL)
            try:
                res = self.heartbeat(self.task_token)
                if res['cancelRequested']:
                    proc.terminate()
                    proc.join()
                    return Result('cancelled', -1, '', '', '', -1)
            except Exception as err:
                if heartbeat_retry <= config.ACTIVITY_HEARTBEAT_MAX_RETRY:
                    heartbeat_retry += 1
                    continue
                else:
                    proc.terminate()
                    proc.join()
                    raise

        # Evaluate the result.
        result = queue.get_nowait()
        proc.join()
        return result
Ejemplo n.º 10
0
def _get_output_shape(model_fn):
    if K.backend() == 'cntk':
        # Create model in a subprocess so that
        # the memory consumed by InceptionResNetV2 will be
        # released back to the system after this test
        # (to deal with OOM error on CNTK backend).
        # TODO: remove the use of multiprocessing from these tests
        # once a memory clearing mechanism
        # is implemented in the CNTK backend.
        def target(queue):
            model = model_fn()
            queue.put(model.output_shape)
        queue = Queue()
        p = Process(target=target, args=(queue,))
        p.start()
        p.join()
        # The error in a subprocess won't propagate
        # to the main process, so we check if the model
        # is successfully created by checking if the output shape
        # has been put into the queue
        assert not queue.empty(), 'Model creation failed.'
        return queue.get_nowait()
    else:
        model = model_fn()
        return model.output_shape
Ejemplo n.º 11
0
def f(idx, q,r):
    path = "data%s"%(idx)
    os.makedirs(path)
    while True:
        item = q.get()
        if( item.item_type == ITEM_QUIT ):
            break;

        count = 0
        localQueue = Queue()
        current = item.data
        while True:
            print current
            fo = urlopen(current)
            data = fo.read()
            name = "%s/%s"%(path,count)
            fw = open( name, "w" )
            count = count + 1
            fw.write(data)
            fw.close()
            fo.close()
            p = MyHTMLParser()
            try:
                p.feed(data)
            except:
                pass

            for href in p.hrefs:
                print item.data, ": ", href

            try:
                current = localQueue.get_nowait()
            except:
                break;
Ejemplo n.º 12
0
class _workerQpushTimer():
    def __init__(self):
	self.syncPeriod = 2 
	self.timer = None
	self.Qinit()
    def Qinit(self):
	self.syncTmpQ = Queue()
    # flush remain items in queue, and then close and join_thread
    def Qflush(self):
	while True:
	    try:
		self.syncTmpQ.get(True, comm.FLUSH_TIMEOUT)
	    except Empty:
		break
	self.syncTmpQ.close()
	self.syncTmpQ.join_thread()
    def enableTimer(self, workerPool):
	self.timer = Timer(self.syncPeriod, self.pushToWorkerQ, [workerPool])
	self.timer.start()
    def disableTimer(self):
	if self.timer is not None:
	    self.timer.cancel()
    # function executed periodically, used to sync queue between main process queue and worker queue
    def pushToWorkerQ(self, workerPool):
	while not comm.done.value:
	    try:
		item = self.syncTmpQ.get_nowait() 
		for w in workerPool:
		    w.queue.put_nowait(item)
	    except Empty:
		break
	if not comm.done.value:
	    self.enableTimer(workerPool)
Ejemplo n.º 13
0
	def Launch(self):
		lastcheck = 0
		# process for checking for levels to be saved
		p = None
		# queue to communicate with level saving process
		q = Queue()
		q.put("START", block=True)
		
		# just keep doing this forever
		while True:
			self.Pump()
			# periodically check for unsaved levels with changes and save them
			if lastcheck < time() - settings.SAVEINTERVAL and not q.empty():
				# check if the running process has sent back a list of levels it has saved
				try:
					saved = q.get_nowait()
				except Empty:
					# in rare cases, q.empty() might have returned the wrong answer
					saved = None
				
				# incase q.empty() returned the wrong answer
				if not saved is None:
					# if we actually saved some levels
					if saved != "START":
						# write a log about it
						[self.Log("SAVED %s: %s" % s) for s in saved]
						# update our last saved array
						self.SetSaved([s[1] for s in saved if s[0] == "LEVEL"])
					# launch process to save all unsaved levels
					p = Process(target=SaveData, args=(q, self.levels, self.clientData)).start()
					# update last checked time
					lastcheck = time()
			# make sure we don't eat 100% of CPU
			sleep(0.0001)
class QueueEventsSub:

    def __init__(self, maxsize=0):
        self._maxsize = maxsize
        self._q = Queue(maxsize=maxsize)

    def put_event(self, e):
        if self._q.qsize() == self._maxsize and self._maxsize != 0:
            self._q.get_nowait()

        self._q.put(e)

    def get_event(self):
        if self._q.qsize() == 0:
            return None
        return self._q.get()
Ejemplo n.º 15
0
class RepeatPool:

    """Implements repeating several times the same function through processes
    and returning the list of results. Takes care of using a different numpy
    random state in each process.
    """

    def __init__(self, target):
        self.n_processes = cpu_count()
        self.target = target

    def work(self, seed):
        np.random.seed(seed)
        result = self.target()
        self.result_queue.put(result)

    def run(self, n):
        self.workers = [None] * self.n_processes
        self.to_go = n  # Number of repetitions still to be run
        self.result_queue = Queue()
        results = []
        while self._still_working():
            self._clean()
            self._fill()
            try:  # Waits for at least one job to push results.
                results.append(self.result_queue.get(False, 1))
            except Empty:  # The timeout prevents blocking if jobs fail to push
                pass
        # Collect remaining results just in case
        results.extend([self.result_queue.get_nowait()
                        for _ in range(n - len(results))])
        return results

    def _still_working(self):
        return self.to_go > 0 or any([w is not None for w in self.workers])

    def _start_or_None(self):
        if self.to_go > 0:
            self.to_go -= 1
            p = Process(target=self.work, args=(np.random.randint(2**32),))
            p.start()
            return p
        else:
            return None

    def _clean(self):
        self.workers = [w if w is None or w.is_alive() else self._clean_job(w)
                        for w in self.workers]

    def _clean_job(self, w):
        if w.exitcode > 0:
            raise RuntimeError("A subprocess has failed.")
        w.join()
        return None

    def _fill(self):
        self.workers = [self._start_or_None()
                        if w is None else w for w in self.workers]
Ejemplo n.º 16
0
def test_inceptionresnetv2_variable_input_channels():
    def target(queue, input_shape):
        model = applications.InceptionResNetV2(weights=None, include_top=False, input_shape=input_shape)
        queue.put(model.output_shape)

    queue = Queue()
    p = Process(target=target, args=(queue, (None, None, 1)))
    p.start()
    p.join()
    assert not queue.empty(), 'Model creation failed.'
    model_output_shape = queue.get_nowait()
    assert model_output_shape == (None, None, None, 1536)

    p = Process(target=target, args=(queue, (None, None, 4)))
    p.start()
    p.join()
    assert not queue.empty(), 'Model creation failed.'
    model_output_shape = queue.get_nowait()
    assert model_output_shape == (None, None, None, 1536)
Ejemplo n.º 17
0
class SimpleCommandQueue():

    def __init__(self):
        self._q = Queue()

    def put(self, command, *args):
        self._q.put((command, args))

    def get(self):
        return self._q.get_nowait()
Ejemplo n.º 18
0
def run_parse(host_name, request):
    '''
    Запускаем парсер в отдельном потоке
    '''

    try:
        if host_name not in AVAILABLE_HOSTS:
            # сообщаем в лог о неправильном запросе
            raise ValueError("Unavailable Host " + host_name)

        queue = Queue()
        queue.put(request)
        parser = import_parser_by_host(host_name)
        p = Process(target=parser.parse, args=(queue,))
        p.start()
        p.join()
        response = False
        try:
            response = queue.get_nowait()
        finally:
            error_message = "Runtime Parser Error"

            if not response:
                return True, []

            if response.get('response', False) is False:
                raise ParserError(error_message)

            if not response.get('success', False):
                raise ParserError(response.get('response', error_message))

        results = response.get('response')

        flags = response.get('flags', False)
        if flags and type(flags) == type([]):
            if 'no_range' in flags and request['range']:
                try:
                    min_value, max_value = map(float, request['range'].split(','))
                    temp = filter(lambda value: min_value <= value['total'] <= max_value, results)
                except Exception:
                    pass
                else:
                    results = temp

        response = results

    except Exception, e:
        # Ошибка выполнения модуля, записываем ошибку в лог выводим в консоль
        error_message = ''
        if DEBUG_LOGGING:
            error_message = '\n' +\
                traceback.format_exc()
        error_message += str(e)
        logger.critical(error_message)
        return False, e
Ejemplo n.º 19
0
Archivo: Linker.py Proyecto: Oueee/SOS
class Linker(object):
    instance_count = 0

    def __init__(self):
        self.queue = Queue()

        self.id = Linker.instance_count
        Linker.instance_count += 1

        self.queue_watcher = Process(target=self.reader)
        self.queue_watcher.start()

    def join(self):
        if self.queue_watcher:
            self._send(self.id, Message.Types.end)
            self.queue_watcher.join()

    def _send(self, id_receiver, type_message, data={}):
        message = Message(self.id, type_message, data)
        ILOTS_LIST[id_receiver].put_in_queue(message)

    def reader(self):

        while True:
            try:
                msg = self.queue.get_nowait()
                if msg.type == Message.Types.stat_transmission:
                    data = self.compare(msg.data)
                    self._send(msg.id_sender, Message.Types.answer, data)
                elif msg.type == Message.Types.answer:
                    self.process_answer(msg.data)
                else:
                    break
            except Q.Empty:
                pass

    def put_in_queue(self, stat):
        self.queue.put(stat)

    def send_statistics(self):
        if randint(0, 100) < PERCENTAGE_TRANSMISSION_STATS:
            id_receiver = self.id
            while id_receiver == self.id:
                id_receiver = randint(0, NB_ILOTS)

            self._send(1, Message.Types.stat_transmission, self.get_stats())

    def get_stats(self):
        pass

    def process_answer(self, data):
        pass

    def compare(self, stat):
        pass
class CalculationQueue:
    """
    This class is a wrapper around a multiprocessing.Queue

    It can be used to send and receive values from the modules while processing the calculation.
    You can use it to save - for example - filepaths of outputfiles that you create on the fly.
    The added items are all of the type CalculationQueueItem.
    The CalculationQueue can be used as a dict. After the termination of the underlaying process
    you can access the different entries by their names you gave them when putting them on the queue.
    """

    def __init__(self):
        """
        Create a queue.
        """
        #: The multiprocessing queue to handle
        self.queue = Queue()
        #: The results to be filled in the end
        self.results = dict()

    def put(self, name, item, **kwargs):
        """
        Put an item on the queue with the given name. Please keep that adding two items with the same name
        overrides one of them!
        """
        self.queue.put(CalculationQueueItem(name, item), **kwargs)

    def fill_results(self):
        """
        Fill the internal dict with the information of the queue.
        Do not call this on your own.
        Do only call this when the underlying process has ended.
        """
        while True:
            try:
                result = self.queue.get_nowait()
                self.results.update({result.name: result.item})
            except Empty:
                return

    def get(self, name):
        """
        Return the item with the given name or an Exception when it is not found.
        Do not call this on your own..
        """
        self.fill_results()
        return self.results[name]

    def get_keys(self):
        """
        Return all possible names of items saved in this queue.
        Do not call this on your own.
        """
        self.fill_results()
        return list(self.results.keys())
Ejemplo n.º 21
0
class BuildLogMonitor(PulseBuildMonitor):

  def __init__(self, logdir='logs', outputlog='buildlogmonitor.out',
               errorlog='buildlogmonitor.err', es=False, es_servers=None,
               output_dir=None, include_pass=None, **kwargs):
    self.logdir = os.path.abspath(logdir)
    self.logger = None
    self.es = es
    self.es_servers = es_servers
    self.output_dir = output_dir
    self.include_pass = include_pass

    self.job_queue = Queue()

    # setup logging
    if outputlog:
      self.logger = logging.getLogger('BuildLogMonitor')
      self.logger.setLevel(logging.DEBUG)
      loghandler = logging.handlers.RotatingFileHandler(
                   outputlog, maxBytes=1000000, backupCount=3)
      loghandler.setLevel(logging.DEBUG)
      formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
      loghandler.setFormatter(formatter)
      self.logger.addHandler(loghandler)

      if errorlog:
        errhandler = logging.FileHandler(errorlog, 'w')
        errhandler.setLevel(logging.ERROR)
        errhandler.setFormatter(formatter)
        self.logger.addHandler(errhandler)

    self.processes = [self._start_parser_process() for x in range(2)]

    PulseBuildMonitor.__init__(self, logger=self.logger, trees=trees_to_watch, **kwargs)

    signal.signal(signal.SIGTERM, self.sighandler)

  def sighandler(self, signumber, frame):
    # dump any pending items in the queue and then quit
    f = open(os.path.join(os.path.dirname(self.logdir), 'pending_jobs.txt'), 'w')
    filedata = []
    while True:
      try:
        logfile, harnessType = self.job_queue.get_nowait()
        filedata.append([logfile, harnessType])
      except Empty:
        break
      except Exception, inst:
        if self.logger:
          self.logger.exception(inst)
    f.write("%s\n" % json.dumps(filedata))
    f.close()
    os.kill(os.getpid(), signal.SIGKILL)
Ejemplo n.º 22
0
class ImageLoader:
    global testing
    OUTPUT_QUEUE_SIZE = 1000
    INPUT_QUEUE_SIZE = 100
    WORKER_COUNT = 3

    def __init__(self):
        self.input_queue = Queue(self.INPUT_QUEUE_SIZE)
        self.output_queue = Queue(self.OUTPUT_QUEUE_SIZE)

    def start(self, folder):
        os.chdir(folder)
        self.pool = multiprocessing.Pool(self.WORKER_COUNT,
                                         worker_main, (
                                             self.input_queue,
                                             self.output_queue,
                                             testing
                                         ))

    def stop(self):
        self.pool.terminate()

    def put(self, filename, w, h):
        if testing:
            with Timer() as t:
                with open(filename, 'rb') as f:
                    iofile = io.BytesIO(f.read())
            opentime = t.msecs
            self.input_queue.put((iofile, filename, w, h))
            return opentime
        if not testing:
            with open(filename, 'rb') as f:
                iofile = io.BytesIO(f.read())
            self.input_queue.put((iofile, filename, w, h))

    def get(self, *p, **kw):
        try:
            imobj = self.output_queue.get_nowait(*p, **kw)
            if not testing:
                image = imobj[1]
                image = Image.frombytes(
                    image['mode'],
                    image['size'],
                    image['pixels'])
                imobj[1] = image
                # print "readahead", self.output_queue.qsize()
            return imobj
        except Empty:
            e = sys.exc_info()[0]
            if e is not Empty:
                print e
            return "none"
Ejemplo n.º 23
0
def test_iterate_empty_psml_capture(simple_summary_capture):
    simple_summary_capture.display_filter = "frame.len == 1"
    q = Queue()
    p = Process(target=_iterate_capture_object, args=(simple_summary_capture, q))
    p.start()
    p.join(2)
    try:
        no_hang = q.get_nowait()
    except Empty:
        no_hang = False
    if p.is_alive():
        p.terminate()
    assert no_hang
Ejemplo n.º 24
0
class Shared(object):
    """Quick and dirty one-way shared object.
    Blocks on first get, later returns the latest value without blocking.
    Not safe for use by more than one producer and consumer.
    """
    def __init__(self):
        self.value = Queue(1)

    def get(self):
        try:
            self.local_value = self.value.get_nowait()
        except queue.Empty: pass
        try:
            return self.local_value
        except AttributeError:
            self.local_value = self.value.get()
            return self.local_value

    def put(self, local_value):
        try:
            self.value.get_nowait()
        except queue.Empty: pass
        self.value.put(local_value)
Ejemplo n.º 25
0
def queryPriceSource(priceSourceClass, sourceId, storagePath, resources, requestsQueue, resultsQueue, exitEvent):
    pricesQueue = MpQueue()
    priceSource = priceSourceClass(pricesQueue, storagePath, resources)
    while True:
        if exitEvent.is_set():
            priceSource.terminate()
            return
        while not requestsQueue.empty():
            jobId, cookie, cardName, setId, language, foilness = requestsQueue.get_nowait()
            priceSource.queryPrice(cardName, setId, language, foilness, (jobId, cookie,))
        while not pricesQueue.empty():
            priceInfo, priceCookie = pricesQueue.get_nowait()
            jobId, cookie = priceCookie
            resultsQueue.put((jobId, sourceId, priceInfo, cookie,))
Ejemplo n.º 26
0
class QTestRunner:
    def __init__(self, result):
        self.result = result
        self.done = False
        self.timer = QtCore.QTimer()
        self.timer.connect(self.timer, 
                   QtCore.SIGNAL('timeout()'),
                   self.tick
                   )
        self.timer.setInterval(500)
    
    def run(self, test):
        self.done = False
        self.q = Queue()
        self.result.setAmount(test.countTestCases())
        self.result.enter()
        self.proc = Process(target=self.bgProcess, args=(test, self.q))
        self.proc.start()
        self.timer.start()
    
    def tick(self):
        c = True
        while c:
            try:
                data = self.q.get_nowait()
                key, args = data
                if key == 'done':
                    self.timer.stop()
                    self.done = True
                    c = False
                self.result.translate[key](*args)
            except Empty:
                c = False
    
    @staticmethod
    def bgProcess(suite, q):
        pseudo_file = StringIO.StringIO()
        sys.stdout = sys.stderr = pseudo_file
        result = BGTestResult(q, pseudo_file)
        start = time.time()
        suite(result)
        q.put(('done', [time.time() - start]))
        q.close()
        sys.stdout = sys.__stdout__
        sys.stderr = sys.__stderr__
    
    def stop(self):
        self.timer.stop()
        self.proc.terminate()
Ejemplo n.º 27
0
def plmapp(func, args=[], kwargs=[], processes=10, progress_bar=False, default_output=None, individual_timeout=20):
    """ 
    """
    bigger_array = args if len(args) > len(kwargs) else kwargs 
    big_len = len(bigger_array)
    ag = lambda array, index, default : default if index >= len(array) else array[index]

    input_queue = Queue()
    output_queue = Queue()
    error_queue = Queue()


    progress_details = {'completed_tasks' : Value('i', 0), 
                        'total_tasks' : big_len, 
                        'process_lock' : Lock()}

    # Load the input queue with tasks
    for i in xrange(big_len):
        task = (i, func, ag(args, i, ()), ag(kwargs, i, {})) 
        input_queue.put(task)

    # Create the processes
    processes = []
    end = min(processes, big_len)
    start = -1 if progress_bar else 0


    for i in xrange(start, end):
        p = Process(target=queue_exec, args=(input_queue, output_queue, i, progress_details, default_output, individual_timeout))
        processes.append(p)
        p.start()

    # Wait for the processes to complete    
    for process in processes:
        process.join() 

    output_errors = [ None for _ in xrange(big_len)]
    output_values = [ None for _ in xrange(big_len)]

    while True:
        try:
            loc, error_value = error_queue.get_nowait()
            output_errors[loc] = error_value
        except:
            break


    return output_errors, output_values
Ejemplo n.º 28
0
class SerialManager(Process):
    """ This class has been written by
        Philipp Klaus and can be found on
        https://gist.github.com/4039175 .  """

    def __init__(self, device, **kwargs):
        settings = dict()
        settings['baudrate'] = 115200
        settings['bytesize'] = serial.EIGHTBITS
        settings['parity'] = serial.PARITY_NONE
        settings['stopbits'] = serial.STOPBITS_ONE
        settings['timeout'] = 0.0005
        settings.update(kwargs)
        self._kwargs = settings
        self.ser = serial.Serial(device, **self._kwargs)
        self.in_queue = Queue()
        self.out_queue = Queue()
        self.closing = False # A flag to indicate thread shutdown
        self.read_num_bytes  = 256
        self.sleeptime = None
        self._chunker = None
        Process.__init__(self, target=self.loop)

    def set_chunker(self, chunker):
        self._chunker = chunker
        self.in_queue = chunker.in_queue

    def loop(self):
        try:
            while not self.closing:
                if self.sleeptime: time.sleep(self.sleeptime)
                in_data = self.ser.read(self.read_num_bytes)
                if in_data:
                    if self._chunker:
                        self._chunker.new_data(in_data)
                    else:
                        self.in_queue.put(in_data)
                try:
                    out_buffer = self.out_queue.get_nowait()
                    self.ser.write(out_buffer)
                except Empty:
                    pass
        except (KeyboardInterrupt, SystemExit):
            pass
        self.ser.close()

    def close(self):
        self.closing = True
class SerialManager(Process):
	""" This class has been written by
		Philipp Klaus and can be found on
		https://gist.github.com/4039175 .  
		
		modified by Shunya
	"""

	def __init__(self, device, kwargs):
		settings = dict()
		settings['baudrate'] = 9600
		settings['bytesize'] = serial.EIGHTBITS
		settings['parity'] = serial.PARITY_NONE
		settings['stopbits'] = serial.STOPBITS_ONE
		settings['timeout'] = 0.0005
		settings.update(kwargs)
		self._kwargs = settings
		self.ser = serial.Serial(device, **self._kwargs)
		self.outgoings = []
		self.out_queue = Queue()
		self.closing = False # A flag to indicate thread shutdown
		self.read_num_bytes  = 256
		self.sleeptime = None
		Process.__init__(self, target=self.loop)

	def loop(self):
		try:
			while not self.closing:
				if self.sleeptime: time.sleep(self.sleeptime)
				in_data = self.ser.read(self.read_num_bytes)
				if in_data:
					#logger.debug(in_data)
					for q in self.outgoings:
						q.put(in_data)
				try:
					out_buffer = self.out_queue.get_nowait()
					self.ser.write(out_buffer)
				except Empty:
					pass
		except (KeyboardInterrupt, SystemExit):
			pass
		self.ser.close()

	def appendOutgoingQueue(self, anotherQueue):
		self.outgoings.append(anotherQueue)

	def close(self):
		self.closing = True
Ejemplo n.º 30
0
 def set_from_file(self, varfile_path, args):
     q = Queue()
     p = Process(target=set_from_file, args=(q, varfile_path, args))
     p.start()
     p.join()
     there_are_results = False
     while True:
         try:
             results = q.get_nowait()
             there_are_results  = True
             if len(results) == 1:
                 raise DataError(results[0])
             self.set(*results)
         except Empty:
             if not there_are_results:
                 raise DataError('No variables')
             return
Ejemplo n.º 31
0
class Tui(PopUpLauncher):
    signals = ['close']

    def __init__(self, controller, style):
        # Shared objects to help event handling.
        self.events = Queue()
        self.lock = Lock()

        self.view = MainWindow(controller)
        self.screen = raw_display.Screen()
        self.screen.set_terminal_properties(256)

        self.loop = MainLoop(widget=self,
                             palette=style,
                             screen=self.screen,
                             unhandled_input=Tui.exit_handler,
                             pop_ups=True)

        self.pipe = self.loop.watch_pipe(self.update_ui)
        self.loop.set_alarm_in(0.1, Tui.update_timer, self.view.logo.timer)
        super(Tui, self).__init__(self.view)

        connect_signal(self.view.issues_table, 'refresh',
                       lambda source: self.loop.draw_screen())
        connect_signal(self.view.stat_table, 'refresh',
                       lambda source: self.loop.draw_screen())

    def update_ui(self, _):
        while True:
            try:
                event = self.events.get_nowait()
                if hasattr(self, event['fn']):
                    getattr(self, event['fn'])(**event['kwargs'])
            except:
                break

    def update_timer(self, timer):
        if timer.update():
            self.set_alarm_in(0.1, Tui.update_timer, timer)

    def new_fuzz_job(self, ident, fuzzer, sut, cost, batch):
        self.view.job_table.add_fuzz_job(ident, fuzzer, sut, cost, batch)

    def new_reduce_job(self, ident, sut, cost, issue_id, size):
        self.view.job_table.add_reduce_job(ident, sut, cost, issue_id, size)

    def new_update_job(self, ident, sut):
        self.view.job_table.add_update_job(ident, sut)

    def remove_job(self, ident):
        self.view.job_table.remove_job(ident)

    def activate_job(self, ident):
        self.view.job_table.activate_job(ident)

    def job_progress(self, ident, progress):
        self.view.job_table.job_progress(ident, progress)

    def update_load(self, load):
        self.view.logo.load.set_completion(load)

    def update_fuzz_stat(self):
        self.view.stat_table.update()

    def new_issue(self, issue):
        issue['sut'] = config_get_name_from_section(issue['sut'])
        # Do shiny animation if a new issue has received.
        self.view.logo.do_animate = True
        self.loop.set_alarm_at(time.time() + 5,
                               callback=self.view.logo.stop_animation)
        self.loop.set_alarm_in(0.1, self.view.logo.animate, self.view.logo)
        self.view.issues_table.add_row(issue)

    def update_issue(self, issue):
        self.view.issues_table.update_row(ident=issue['_id'])

    def warning(self, msg):
        self.view._emit('warning', msg)

    @staticmethod
    def exit_handler(key):
        if key in ('q', 'Q', 'f10'):
            raise ExitMainLoop()
Ejemplo n.º 32
0
  p.join()
   print("Exited via signal")
  os.kill(os.getpid(), signal.SIGKILL)
  print('parent dead')
'''

if __name__ == '__main__':
    q = Queue()
    q.cancel_join_thread()

    p = Process(target=GUI, args=(q,))
    p.start()

    #signal.signal(signal.SIGINT, lambda signum, frame: shutdown(signum, frame, p))

    print("I'm lit!!!")
    start = time.time()
    while True:
        if not q.empty():
            m = q.get_nowait()
            if m == "stop!":
                break
        # print("I'm still going")
        curr = time.time()
        if curr-start >= 300:
            p.terminate()
            p.join()
            break

    print('done!')
Ejemplo n.º 33
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from multiprocessing import Queue

q = Queue(3)
q.put("消息1")
q.put("消息2")
q.put("消息3")

#因为消息列队已满下⾯的try都会抛出异常, 第⼀个try会等待2秒后再抛出异常, 第⼆个Try会⽴刻抛出异常
try:
    q.put("消息4", True, 2)
except:
    print("消息列队已满, 现有消息数量:%s" % q.qsize())
try:
    q.put_nowait("消息4")
except:
    print("消息列队已满, 现有消息数量:%s" % q.qsize())

#推荐的⽅式, 先判断消息列队是否已满, 再写⼊
if not q.full():
    q.put_nowait("消息4")

#读取消息时, 先判断消息列队是否为空, 再读取
if not q.empty():
    for i in range(q.qsize()):
        print(q.get_nowait())
Ejemplo n.º 34
0
def iteration_queue(target_queue: Queue) -> Iterator:
    while True:
        try:
            yield target_queue.get_nowait()
        except queue.Empty:
            break
Ejemplo n.º 35
0
class TestRunnerManager(threading.Thread):
    init_lock = threading.Lock()

    def __init__(self,
                 suite_name,
                 tests,
                 test_source_cls,
                 browser_cls,
                 browser_kwargs,
                 executor_cls,
                 executor_kwargs,
                 stop_flag,
                 pause_after_test=False,
                 pause_on_unexpected=False,
                 restart_on_unexpected=True,
                 debug_info=None):
        """Thread that owns a single TestRunner process and any processes required
        by the TestRunner (e.g. the Firefox binary).

        TestRunnerManagers are responsible for launching the browser process and the
        runner process, and for logging the test progress. The actual test running
        is done by the TestRunner. In particular they:

        * Start the binary of the program under test
        * Start the TestRunner
        * Tell the TestRunner to start a test, if any
        * Log that the test started
        * Log the test results
        * Take any remedial action required e.g. restart crashed or hung
          processes
        """
        self.suite_name = suite_name

        self.tests = tests
        self.test_source_cls = test_source_cls
        self.test_queue = None

        self.browser_cls = browser_cls
        self.browser_kwargs = browser_kwargs

        self.executor_cls = executor_cls
        self.executor_kwargs = executor_kwargs

        self.test_source = None

        # Flags used to shut down this thread if we get a sigint
        self.parent_stop_flag = stop_flag
        self.child_stop_flag = multiprocessing.Event()

        self.pause_after_test = pause_after_test
        self.pause_on_unexpected = pause_on_unexpected
        self.restart_on_unexpected = restart_on_unexpected
        self.debug_info = debug_info

        self.manager_number = next_manager_number()

        self.command_queue = Queue()
        self.remote_queue = Queue()

        self.test_runner_proc = None

        threading.Thread.__init__(self,
                                  name="Thread-TestrunnerManager-%i" %
                                  self.manager_number)
        # This is started in the actual new thread
        self.logger = None

        self.unexpected_count = 0

        # This may not really be what we want
        self.daemon = True

        self.max_restarts = 5

        self.browser = None

    def run(self):
        """Main loop for the TestManager.

        TestManagers generally receive commands from their
        TestRunner updating them on the status of a test. They
        may also have a stop flag set by the main thread indicating
        that the manager should shut down the next time the event loop
        spins."""
        self.logger = structuredlog.StructuredLogger(self.suite_name)
        with self.browser_cls(
                self.logger,
                **self.browser_kwargs) as browser, self.test_source_cls(
                    self.tests) as test_source:
            self.browser = BrowserManager(self.logger,
                                          browser,
                                          self.command_queue,
                                          no_timeout=self.debug_info
                                          is not None)
            self.test_source = test_source
            dispatch = {
                RunnerManagerState.before_init: self.start_init,
                RunnerManagerState.initalizing: self.init,
                RunnerManagerState.running: self.run_test,
                RunnerManagerState.restarting: self.restart_runner
            }

            self.state = RunnerManagerState.before_init()
            end_states = (RunnerManagerState.stop, RunnerManagerState.error)

            try:
                while not isinstance(self.state, end_states):
                    f = dispatch.get(self.state.__class__)
                    while f:
                        self.logger.debug("Dispatch %s" % f.__name__)
                        if self.should_stop():
                            return
                        new_state = f()
                        if new_state is None:
                            break
                        self.state = new_state
                        self.logger.debug("new state: %s" %
                                          self.state.__class__.__name__)
                        if isinstance(self.state, end_states):
                            return
                        f = dispatch.get(self.state.__class__)

                    new_state = None
                    while new_state is None:
                        new_state = self.wait_event()
                        if self.should_stop():
                            return
                    self.state = new_state
                    self.logger.debug("new state: %s" %
                                      self.state.__class__.__name__)
            except Exception as e:
                self.logger.error(traceback.format_exc(e))
                raise
            finally:
                self.logger.debug(
                    "TestRunnerManager main loop terminating, starting cleanup"
                )
                clean = isinstance(self.state, RunnerManagerState.stop)
                self.stop_runner(force=not clean)
                self.teardown()
        self.logger.debug("TestRunnerManager main loop terminated")

    def wait_event(self):
        dispatch = {
            RunnerManagerState.before_init: {},
            RunnerManagerState.initalizing: {
                "init_succeeded": self.init_succeeded,
                "init_failed": self.init_failed,
            },
            RunnerManagerState.running: {
                "test_ended": self.test_ended,
                "wait_finished": self.wait_finished,
            },
            RunnerManagerState.restarting: {},
            RunnerManagerState.error: {},
            RunnerManagerState.stop: {},
            None: {
                "runner_teardown": self.runner_teardown,
                "log": self.log,
                "error": self.error
            }
        }
        try:
            command, data = self.command_queue.get(True, 1)
        except IOError:
            self.logger.error("Got IOError from poll")
            return RunnerManagerState.restarting(0)
        except Empty:
            if (self.debug_info and self.debug_info.interactive
                    and self.browser.started and not self.browser.is_alive()):
                self.logger.debug("Debugger exited")
                return RunnerManagerState.stop()

            if (isinstance(self.state, RunnerManagerState.running)
                    and not self.test_runner_proc.is_alive()):
                if not self.command_queue.empty():
                    # We got a new message so process that
                    return

                # If we got to here the runner presumably shut down
                # unexpectedly
                self.logger.info("Test runner process shut down")

                if self.state.test is not None:
                    # This could happen if the test runner crashed for some other
                    # reason
                    # Need to consider the unlikely case where one test causes the
                    # runner process to repeatedly die
                    self.logger.critical("Last test did not complete")
                    return RunnerManagerState.error()
                self.logger.warning(
                    "More tests found, but runner process died, restarting")
                return RunnerManagerState.restarting(0)
        else:
            f = (dispatch.get(self.state.__class__, {}).get(command)
                 or dispatch.get(None, {}).get(command))
            if not f:
                self.logger.warning("Got command %s in state %s" %
                                    (command, self.state.__class__.__name__))
                return
            return f(*data)

    def should_stop(self):
        return self.child_stop_flag.is_set() or self.parent_stop_flag.is_set()

    def start_init(self):
        test, test_queue = self.get_next_test()
        if test is None:
            return RunnerManagerState.stop()
        else:
            return RunnerManagerState.initalizing(test, test_queue, 0)

    def init(self):
        assert isinstance(self.state, RunnerManagerState.initalizing)
        if self.state.failure_count > self.max_restarts:
            self.logger.error("Max restarts exceeded")
            return RunnerManagerState.error()

        self.browser.update_settings(self.state.test)

        result = self.browser.init()
        if result is Stop:
            return RunnerManagerState.error()
        elif not result:
            return RunnerManagerState.initalizing(self.state.test,
                                                  self.state.test_queue,
                                                  self.state.failure_count + 1)
        else:
            self.start_test_runner()

    def start_test_runner(self):
        # Note that we need to be careful to start the browser before the
        # test runner to ensure that any state set when the browser is started
        # can be passed in to the test runner.
        assert isinstance(self.state, RunnerManagerState.initalizing)
        assert self.command_queue is not None
        assert self.remote_queue is not None
        self.logger.info("Starting runner")
        executor_browser_cls, executor_browser_kwargs = self.browser.browser.executor_browser(
        )

        args = (self.remote_queue, self.command_queue, self.executor_cls,
                self.executor_kwargs, executor_browser_cls,
                executor_browser_kwargs, self.child_stop_flag)
        self.test_runner_proc = Process(target=start_runner,
                                        args=args,
                                        name="Thread-TestRunner-%i" %
                                        self.manager_number)
        self.test_runner_proc.start()
        self.logger.debug("Test runner started")
        # Now we wait for either an init_succeeded event or an init_failed event

    def init_succeeded(self):
        assert isinstance(self.state, RunnerManagerState.initalizing)
        self.browser.after_init()
        return RunnerManagerState.running(self.state.test,
                                          self.state.test_queue)

    def init_failed(self):
        assert isinstance(self.state, RunnerManagerState.initalizing)
        self.browser.after_init()
        self.stop_runner(force=True)
        return RunnerManagerState.initalizing(self.state.test,
                                              self.state.test_queue,
                                              self.state.failure_count + 1)

    def get_next_test(self, test_queue=None):
        test = None
        while test is None:
            if test_queue is None:
                test_queue = self.test_source.get_queue()
                if test_queue is None:
                    self.logger.info("No more tests")
                    return None, None
            try:
                # Need to block here just to allow for contention with other processes
                test = test_queue.get(block=True, timeout=2)
            except Empty:
                if test_queue.empty():
                    test_queue = None
        return test, test_queue

    def run_test(self):
        assert isinstance(self.state, RunnerManagerState.running)
        assert self.state.test is not None

        if self.browser.update_settings(self.state.test):
            self.logger.info("Restarting browser for new test environment")
            return RunnerManagerState.restarting(self.state.test,
                                                 self.state.test_queue)

        self.logger.test_start(self.state.test.id)
        self.send_message("run_test", self.state.test)

    def test_ended(self, test, results):
        """Handle the end of a test.

        Output the result of each subtest, and the result of the overall
        harness to the logs.
        """
        assert isinstance(self.state, RunnerManagerState.running)
        assert test == self.state.test
        # Write the result of each subtest
        file_result, test_results = results
        subtest_unexpected = False
        for result in test_results:
            if test.disabled(result.name):
                continue
            expected = test.expected(result.name)
            is_unexpected = expected != result.status

            if is_unexpected:
                self.unexpected_count += 1
                self.logger.debug("Unexpected count in this thread %i" %
                                  self.unexpected_count)
                subtest_unexpected = True
            self.logger.test_status(test.id,
                                    result.name,
                                    result.status,
                                    message=result.message,
                                    expected=expected,
                                    stack=result.stack)

        # TODO: consider changing result if there is a crash dump file

        # Write the result of the test harness
        expected = test.expected()
        status = file_result.status if file_result.status != "EXTERNAL-TIMEOUT" else "TIMEOUT"
        is_unexpected = expected != status
        if is_unexpected:
            self.unexpected_count += 1
            self.logger.debug("Unexpected count in this thread %i" %
                              self.unexpected_count)
        if status == "CRASH":
            self.browser.log_crash(test.id)

        self.logger.test_end(test.id,
                             status,
                             message=file_result.message,
                             expected=expected,
                             extra=file_result.extra)

        restart_before_next = (test.restart_after or file_result.status
                               in ("CRASH", "EXTERNAL-TIMEOUT")
                               or ((subtest_unexpected or is_unexpected)
                                   and self.restart_on_unexpected))

        if (self.pause_after_test or (self.pause_on_unexpected and
                                      (subtest_unexpected or is_unexpected))):
            self.logger.info("Pausing until the browser exits")
            self.send_message("wait")
        else:
            return self.after_test_end(restart_before_next)

    def wait_finished(self):
        assert isinstance(self.state, RunnerManagerState.running)
        # The browser should be stopped already, but this ensures we do any post-stop
        # processing
        self.logger.debug("Wait finished")

        return self.after_test_end(True)

    def after_test_end(self, restart):
        assert isinstance(self.state, RunnerManagerState.running)
        test, test_queue = self.get_next_test()
        if test is None:
            return RunnerManagerState.stop()
        if test_queue != self.state.test_queue:
            # We are starting a new group of tests, so force a restart
            restart = True
        if restart:
            return RunnerManagerState.restarting(test, test_queue)
        else:
            return RunnerManagerState.running(test, test_queue)

    def restart_runner(self):
        """Stop and restart the TestRunner"""
        assert isinstance(self.state, RunnerManagerState.restarting)
        self.stop_runner()
        return RunnerManagerState.initalizing(self.state.test,
                                              self.state.test_queue, 0)

    def log(self, action, kwargs):
        getattr(self.logger, action)(**kwargs)

    def error(self, message):
        self.logger.error(message)
        self.restart_runner()

    def stop_runner(self, force=False):
        """Stop the TestRunner and the browser binary."""
        if self.test_runner_proc is None:
            return

        if self.test_runner_proc.is_alive():
            self.send_message("stop")
        try:
            self.browser.stop(force=force)
            self.ensure_runner_stopped()
        finally:
            self.cleanup()

    def teardown(self):
        self.logger.debug("teardown in testrunnermanager")
        self.test_runner_proc = None
        self.command_queue.close()
        self.remote_queue.close()
        self.command_queue = None
        self.remote_queue = None

    def ensure_runner_stopped(self):
        self.logger.debug("ensure_runner_stopped")
        if self.test_runner_proc is None:
            return

        self.logger.debug("waiting for runner process to end")
        self.test_runner_proc.join(10)
        self.logger.debug("After join")
        if self.test_runner_proc.is_alive():
            # This might leak a file handle from the queue
            self.logger.warning("Forcibly terminating runner process")
            self.test_runner_proc.terminate()
            self.test_runner_proc.join(10)
        else:
            self.logger.debug("Testrunner exited with code %i" %
                              self.test_runner_proc.exitcode)

    def runner_teardown(self):
        self.ensure_runner_stopped()
        return RunnerManagerState.stop()

    def send_message(self, command, *args):
        self.remote_queue.put((command, args))

    def cleanup(self):
        self.logger.debug("TestManager cleanup")
        if self.browser:
            self.browser.cleanup()
        while True:
            try:
                self.logger.warning(" ".join(
                    map(repr, self.command_queue.get_nowait())))
            except Empty:
                break
Ejemplo n.º 36
0
class Publisher(Process):
    """
    Publishes asynchronous messages

    For example, from a group channel, you may send instructions to every
    direct channels::

        # get a publisher
        publisher = bus.publish()

        # send instruction to direct channels
        publisher.put(bot.direct_channels, instruction)

    From within a direct channel, you may reflect your state to observers::

        # get a publisher
        publish = bus.publish()

        # share new state
        publisher.put(bot.id, bit_of_information_here)

    """

    DEFER_DURATION = 0.3  # allow subscribers to connect
    EMPTY_DELAY = 0.005   # time to wait if queue is empty

    def __init__(self, context):
        """
        Publishes asynchronous messages

        :param context: general settings
        :type context: Context

        """
        Process.__init__(self)
        self.daemon = True

        self.context = context

        self.fan = Queue()

        self.socket = None  # allow socket injection for tests

    def run(self):
        """
        Continuously broadcasts messages

        This function is looping on items received from the queue, and
        is handling them one by one in the background.

        Processing should be handled in a separate background process, like
        in the following example::

            publisher = Publisher(address)
            process = publisher.start()

        The recommended way for stopping the process is to change the
        parameter ``general.switch`` in the context. For example::

            engine.set('general.switch', 'off')

        Alternatively, the loop is also broken when a poison pill is pushed
        to the queue. For example::

            publisher.fan.put(None)

        """
        address=self.context.get('bus.address')

        if not self.socket:
            zmq_context = zmq.Context.instance()
            self.socket = zmq_context.socket(zmq.PUB)
            self.socket.linger = 0
            self.socket.bind(address)

        time.sleep(self.DEFER_DURATION)  # allow subscribers to connect

        logging.info(u"Starting publisher")
        logging.debug(u"- publishing at {}".format(address))

        try:
            self.context.set('publisher.counter', 0)
            while self.context.get('general.switch', 'on') == 'on':

                if self.fan.empty():
                    time.sleep(self.EMPTY_DELAY)
                    continue

                try:
                    item = self.fan.get_nowait()
                    if item is None:
                        break

                    self.context.increment('publisher.counter')
                    self.process(item)

                except Exception as feedback:
                    logging.exception(feedback)

        except KeyboardInterrupt:
            pass

        self.socket.close()
        self.socket = None

        logging.info("Publisher has been stopped")

    def process(self, item):
        """
        Processes items received from the queue

        :param item: the item received
        :type item: str

        Note that the item should result from serialization
        of (channel, message) tuple done previously.
        """
        logging.debug(u"Publishing {}".format(item))
        self.socket.send_string(item)

    def put(self, channels, message):
        """
        Broadcasts a message

        :param channels: one or multiple channels
        :type channels: str or list of str

        :param message: the message to send
        :type message: dict or other json-serializable object

        Example::

            message = { ... }
            publisher.put(bot.id, message)

        This function actually put the message in a global queue that is
        handled asynchronously. Therefore, when the function returns there is
        no guarantee that message has been transmitted nor received.
        """
        assert channels
        if isinstance(channels, string_types):
            channels = [channels]

        assert message
        text = json.dumps(message)
        for channel in channels:
            item = channel + ' ' + text
            logging.debug(u"Queuing {}".format(item))
            self.fan.put(item)
Ejemplo n.º 37
0
class MainWindow(QMainWindow):
    def __init__(self):
        super(MainWindow, self).__init__()

        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)

        self.entry_videoThread = None
        self.exit_videoThread = None
        self.recognized_personsThread = None

        self.entry_input_queue = Queue(maxsize=5)
        self.exit_input_queue = Queue(maxsize=5)

        self.recognized_queue = Queue(maxsize=15)

        self.entry_recognitionProcess = None
        self.exit_recognitionProcess = None

        self.display_width = 556  # self.ui.entry_door_image_label.width()
        self.display_height = 529  # self.ui.exit_door_image_label.height()

        self.entry_frames_count = 0
        self.exit_frames_count = 0

        self.add_functions()

        self.start_recognition()

    def add_functions(self):
        """Назначение функций нажатию кнопок."""
        self.ui.restart_recognition_action.triggered.connect(
            self.start_recognition)
        self.ui.set_cams_action.triggered.connect(self.set_cams)

    def set_cams(self):
        """Выбор камер."""
        self.close_threads()

        # Получение списка доступных камер
        valid_cams = get_dict_of_valid_cams_id()
        print(valid_cams)
        if not valid_cams:
            self.show_error('Не найдено камер для подключения.')

        # Для каждой найденной камеры показать Диалоговое окно с выбором места
        for id_cam, cap in valid_cams.items():
            ret, cv_img = cap.read()
            if not ret:
                continue
            image = self.convert_cv_qt(cv_img)
            cap.release()
            dialog = DialogSetCam(image, id_cam)
            dialog.exec()

        self.start_recognition()

    def start_recognition(self):
        """Запуск потоков для отображения видеоряда."""
        # Завершение работающих потоков
        self.close_threads()

        self.entry_videoThread = VideoThread(cam_id=CAMS['entry'])
        self.entry_videoThread.change_pixmap_signal.connect(
            self.update_image_entry)
        self.entry_videoThread.error_read_cam_signal.connect(self.show_error)

        self.entry_recognitionProcess = RecognitionProcess(
            name='entry',
            image_queue=self.entry_input_queue,
            output_queue=self.recognized_queue,
        )
        self.recognized_personsThread = RecognisedThread(self.recognized_queue)
        self.recognized_personsThread.change_logged_signal.connect(
            self.update_last_logged)

        if CAMS['entry'] != CAMS['exit']:
            self.exit_videoThread = VideoThread(cam_id=CAMS['exit'])
            self.exit_videoThread.change_pixmap_signal.connect(
                self.update_image_exit)
            self.exit_videoThread.error_read_cam_signal.connect(
                self.show_error)
            self.exit_recognitionProcess = RecognitionProcess(
                name='exit',
                image_queue=self.exit_input_queue,
                output_queue=self.recognized_queue)

        self.entry_videoThread.start()
        self.entry_recognitionProcess.start()
        self.recognized_personsThread.start()
        if self.exit_videoThread:
            self.exit_videoThread.start()
            self.exit_recognitionProcess.start()

    def put_entry_frame(self, frame: np.ndarray) -> None:
        if self.entry_input_queue.full():
            self.entry_input_queue.get_nowait()
        self.entry_input_queue.put(frame)

    def put_exit_frame(self, frame: np.ndarray) -> None:
        if self.exit_input_queue.full():
            self.exit_input_queue.get_nowait()
        self.exit_input_queue.put(frame)

    @pyqtSlot(np.ndarray)
    def update_image_entry(self, cv_img: np.ndarray) -> None:
        """Updates the image_label with a new opencv image"""
        if self.entry_frames_count == FRAMES_COUNT:
            self.put_entry_frame(cv_img)
            self.entry_frames_count = 0

        self.entry_frames_count += 1

        qt_img = self.convert_cv_qt(cv_img)
        self.ui.entry_door_image_label.setPixmap(qt_img)

    @pyqtSlot(np.ndarray)
    def update_image_exit(self, cv_img: np.ndarray) -> None:
        """Updates the image_label with a new opencv image"""
        if self.exit_frames_count == FRAMES_COUNT:
            self.put_exit_frame(cv_img)
            self.exit_frames_count = 0

        self.exit_frames_count += 1

        qt_img = self.convert_cv_qt(cv_img)
        self.ui.exit_door_image_label.setPixmap(qt_img)

    def update_last_logged(self, door: str, name: str, image_path: str,
                           access: bool) -> None:
        """Отображение распознанных лиц на главном окне."""
        qt_img = self.convert_cv_qt(cv2.imread(image_path))
        time = datetime.now().strftime("%H:%M:%S")
        if door == 'entry':
            self.ui.logged_in_image_label.setPixmap(qt_img)
            self.ui.logged_in_name_label.setText(name)
            self.ui.logged_in_time_label.setText(time)
            self.ui.logged_in_access_label.setText(str(access))
        if door == 'exit':
            self.ui.logged_out_image_label.setPixmap(qt_img)
            self.ui.logged_out_name_label.setText(name)
            self.ui.logged_out_time_label.setText(time)

    def convert_cv_qt(self, cv_img: np.ndarray) -> QPixmap:
        """Конвертация OpenCV Image в QPixmap"""
        rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        bytes_per_line = ch * w
        convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h,
                                            bytes_per_line,
                                            QtGui.QImage.Format_RGB888)
        p = convert_to_Qt_format.scaled(self.display_width,
                                        self.display_height,
                                        Qt.KeepAspectRatio)
        return QPixmap.fromImage(p)

    def show_error(self, text: str) -> None:
        """Вывод ошибки при подключении к камере."""
        QMessageBox.critical(self, "Ошибка ", text, QMessageBox.Ok)

    def close_threads(self) -> None:
        """Завершение выполнения потоков воспроизведения видео и процессов."""
        if self.entry_videoThread:
            self.entry_videoThread.close_thread()
        if self.exit_videoThread:
            self.exit_videoThread.close_thread()
        if self.entry_recognitionProcess:
            self.entry_recognitionProcess.stop_process()
        if self.exit_recognitionProcess:
            self.exit_recognitionProcess.stop_process()

    def closeEvent(self, a0: QtGui.QCloseEvent) -> None:
        super(QMainWindow, self).closeEvent(a0)

        # Принудительное завершение всех дочерних процессов
        me = psutil.Process(os.getpid())
        for child in me.children():
            child.kill()
        self.close_threads()
Ejemplo n.º 38
0
class DjController:
	
	def __init__(self, tracklister):
		self.tracklister = tracklister
		
		self.audio_thread = None
		self.dj_thread = None
		self.playEvent = multiprocessing.Event()
		self.isPlaying = multiprocessing.Value('b', True)
		self.skipFlag = multiprocessing.Value('b', False)
		self.queue = Queue(6)				# A blocking queue of to pass at most N audio fragments between audio thread and generation thread
		
		self.currentMasterString = multiprocessing.Manager().Value(ctypes.c_char_p, '')
		
		self.pyaudio = None
		self.stream = None
		
		self.djloop_calculates_crossfade = False
		
		self.save_mix = False
		self.save_dir_idx = 0
		self.save_dir = './mix_{}.mp3'
		self.save_dir_tracklist = './mix.txt'
		self.audio_to_save = None
		self.audio_save_queue = Queue(6)
		self.save_tracklist = []
			
	def play(self, save_mix = False):
								
		self.playEvent.set()
			
		if self.dj_thread is None and self.audio_thread is None:
			self.save_mix = save_mix
			self.save_dir_idx = 0
			self.audio_to_save = []
			self.save_tracklist = []
			
			if self.save_mix:
				Process(target = self._flush_save_audio_buffer, args=(self.audio_save_queue,)).start()
			
			self.dj_thread = Process(target = self._dj_loop, args=(self.isPlaying,))
			self.audio_thread = Process(target = self._audio_play_loop, args=(self.playEvent, self.isPlaying, self.currentMasterString))
			self.isPlaying.value = True
			self.dj_thread.start()
			
			while self.queue.empty():
				# wait until the queue is full
				sleep(0.1)
				
			self.audio_thread.start()
			self.audio_thread.join()
			
		elif self.dj_thread is None or self.audio_thread is None:
			raise Exception('dj_thread and audio_thread are not both Null!')
	
	def save_audio_to_disk(self, audio, song_title):
		
		self.audio_to_save.extend(audio)
		self.save_tracklist.append(song_title)
		
		if len(self.audio_to_save) > 44100 * 60 * 15: # TODO test
			self.flush_audio_to_queue()
			
	def flush_audio_to_queue(self):
		self.save_dir_idx += 1
		self.audio_save_queue.put((self.save_dir.format(self.save_dir_idx), np.array(self.audio_to_save,dtype='single'), self.save_tracklist))
		self.audio_to_save = []
		self.save_tracklist = []
			
	def _flush_save_audio_buffer(self, queue):
		
		while True:
			filename, audio, tracklist = queue.get()
			if not (filename is None):
				logger.debug('Saving {} to disk'.format(filename))
				writer = MonoWriter(filename=filename, bitrate=320,format='mp3')
				writer(np.array(audio,dtype='single'))
				# Save tracklist
				with open(self.save_dir_tracklist,'a+') as csvfile:
					writer = csv.writer(csvfile)
					for line in tracklist:
						writer.writerow([line])
			else:
				logger.debug('Stopping audio saving thread!')
				return
		
	def skipToNextSegment(self):
		if not self.queue.empty():
			self.skipFlag.value = True
		else:
			self.skipFlag.value = False
			logger.warning('Cannot skip to next segment, no audio in queue!')
			
	def markCurrentMaster(self):
		with open('markfile.csv','a+') as csvfile:
			writer = csv.writer(csvfile)
			writer.writerow([self.currentMasterString.value])
		logger.debug('{:20s} has been marked for manual annotation.'.format(self.currentMasterString.value))
	
	def pause(self):
		if self.audio_thread is None:
			return
		self.playEvent.clear()
		
	def stop(self):
		# If paused, then continue playing (deadlock prevention)
		try:
			self.playEvent.set()
		except Exception as e:
			logger.debug(e)
		# Notify the threads to stop working
		self.isPlaying.value = False
		# Empty the queue so the dj thread can terminate
		while not self.queue.empty():
			self.queue.get_nowait()
		if not self.dj_thread is None:
			self.dj_thread.terminate()
		# Reset the threads
		self.queue = Queue(6)
		self.audio_thread = None
		self.dj_thread = None
		# Reset pyaudio resources
		if not self.stream is None:
			self.stream.stop_stream()
			self.stream.close()
		if not self.pyaudio is None:
			self.pyaudio.terminate()
		self.pyaudio = None
			
	def _audio_play_loop(self, playEvent, isPlaying, currentMasterString):
		
		if self.pyaudio is None:
			# Disable output for a while, because pyaudio prints annoying error messages that are irrelevant but that cannot be surpressed :(
			# http://stackoverflow.com/questions/977840/redirecting-fortran-called-via-f2py-output-in-python/978264#978264
			null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
			save = os.dup(1), os.dup(2)
			os.dup2(null_fds[0], 1)
			os.dup2(null_fds[1], 2)
			
			# Open the audio
			self.pyaudio = pyaudio.PyAudio()
			
			# Reset stderr, stdout
			os.dup2(save[0], 1)
			os.dup2(save[1], 2)
			os.close(null_fds[0])
			os.close(null_fds[1])
			
		if self.stream is None:
			self.stream = self.pyaudio.open(format = pyaudio.paFloat32,
						channels=1,
						rate=44100,
						output=True)
						
		while isPlaying.value:
			toPlay, toPlayStr, masterTitle = self.queue.get()
			logger.info(toPlayStr)
			currentMasterString.value = masterTitle
			if toPlay is None:
				logger.debug('Stopping music')
				return
				
			FRAME_LEN = 1024
			last_frame_start_idx = int(len(toPlay)/FRAME_LEN) * FRAME_LEN
			for cur_idx in range(0,last_frame_start_idx+1,FRAME_LEN):
				playEvent.wait()
				if not self.isPlaying.value:
					return
				if self.skipFlag.value:
					self.skipFlag.value = False
					break
				if cur_idx == last_frame_start_idx:
					end_idx = len(toPlay)
				else:
					end_idx = cur_idx + FRAME_LEN
				toPlayNow = toPlay[cur_idx:end_idx]
				if toPlayNow.dtype != 'float32':
					toPlayNow = toPlayNow.astype('float32')
				self.stream.write(toPlayNow, num_frames=len(toPlayNow), exception_on_underflow=False)
			
	def _dj_loop(self, isPlaying):
		
		TEMPO = 175 # Keep tempo fixed for classification of audio in segment evaluation
		samples_per_dbeat = int(44100 * 4 * 60.0 / TEMPO)
		
		# Array with all songs somewhere in queue at the moment (playing or to be played)
		song_titles_in_buffer = []
		# Sorted list of fade in points in samples relative to start of buffer
		tracklist_changes = []
		# The total number of songs hearable right now
		num_songs_playing = 0
		# The idx of the master in the subset of songs that is playing right now
		songs_playing_master = 0
		
		def add_song_to_tracklist(master_song, anchor_sample, next_song, next_fade_type, cue_master_out, fade_in_len, fade_out_len):
			f = master_song.tempo / TEMPO			
			buffer_in_sample = int(f * (44100 * master_song.downbeats[cue_master_out] - anchor_sample))
			buffer_switch_sample = int(f * (44100 * master_song.downbeats[cue_master_out] - anchor_sample) + fade_in_len * samples_per_dbeat)
			buffer_out_sample = int(f * (44100 * master_song.downbeats[cue_master_out] - anchor_sample) + (fade_in_len + fade_out_len) * samples_per_dbeat)
			
			song_titles_in_buffer.append(next_song.title)
			bisect.insort(tracklist_changes, (buffer_in_sample,'in',next_fade_type))		# Marks the moment from which there's one song more
			bisect.insort(tracklist_changes, (buffer_switch_sample,'switch',next_fade_type))# Marks the moment from which there's a switch in master
			bisect.insort(tracklist_changes, (buffer_out_sample,'out',next_fade_type))		# Marks the moment from which there's one song less
			
		def curPlayingString(fade_type_str):
			
			outstr = 'Now playing:\n'
			for i in range(num_songs_playing):
				if i != songs_playing_master:
					outstr += song_titles_in_buffer[i] + '\n'
				else:
					outstr += song_titles_in_buffer[i].upper() + '\n'
			if fade_type_str != '':
				outstr += '['+fade_type_str+']'
			return outstr
			
			
		if self.save_mix:
			self.audio_to_save = []
			self.save_tracklist = []
		
		# Set parameters for the first song
		current_song = self.tracklister.getFirstSong()
		current_song.open()
		current_song.openAudio()
		anchor_sample = 0
		cue_master_in = current_song.segment_indices[0] # Start at least 32 downbeat into the first song, enough time to fill the buffer
		fade_in_len = 16
		prev_fade_type = tracklister.TYPE_CHILL
		logger.debug('FIRST SONG: {}'.format(current_song.title))
		
		cue_master_out, next_fade_type, max_fade_in_len, fade_out_len = tracklister.getMasterQueue(current_song, cue_master_in + fade_in_len, prev_fade_type)
		next_song, cue_next_in, cue_master_out, fade_in_len, semitone_offset = self.tracklister.getBestNextSongAndCrossfade(current_song, cue_master_out, max_fade_in_len, fade_out_len, next_fade_type)		
		song_titles_in_buffer.append(current_song.title)
		add_song_to_tracklist(current_song, anchor_sample, next_song, next_fade_type, cue_master_out, fade_in_len, fade_out_len)
		prev_in_or_out = 'in'
			
		f = current_song.tempo / TEMPO		
		current_audio_start = 0
		current_audio_end = int((current_song.downbeats[cue_master_out] * 44100) + (fade_in_len + fade_out_len + 2)*samples_per_dbeat/f)
		current_audio_stretched = time_stretch_and_pitch_shift(current_song.audio[current_audio_start:current_audio_end], f)
		
		mix_buffer = current_audio_stretched
		mix_buffer_cf_start_sample = int(f * (current_song.downbeats[cue_master_out] * 44100))
		
		while True:
			
			# Cue the audio from the previous event point till the current event point.
			# The "type" of audio (one song added, one song less, or change of master) is determined
			# by the label of the previous event in the audio buffer
			prev_end_sample = 0
			for end_sample, in_or_out, cur_fade_type in tracklist_changes:
				
				if end_sample > mix_buffer_cf_start_sample:
					break	
						
				if prev_in_or_out == 'in':
					num_songs_playing += 1
				elif prev_in_or_out == 'out':
					num_songs_playing -= 1
					songs_playing_master -= 1
					song_titles_in_buffer = song_titles_in_buffer[1:]
				elif prev_in_or_out == 'switch':
					songs_playing_master += 1
				prev_in_or_out = in_or_out
				
				# If its a double drop, then end_sample and prev_end_sample might be the same! Don't queue empty segments..
				if end_sample > prev_end_sample:
					toPlay = mix_buffer[prev_end_sample : end_sample]
					cur_fade_type_str = cur_fade_type if num_songs_playing > 1 else ''
					toPlayTuple = (toPlay,curPlayingString(cur_fade_type_str), song_titles_in_buffer[songs_playing_master])
					# Save the audio if necessary
					if self.save_mix:
						self.save_audio_to_disk(toPlay, current_song.title)
					# Play this audio
					self.queue.put(toPlayTuple, isPlaying.value)	# Block until slot available, unless audio has stopped: this might raise an exception which is caught below
					prev_end_sample = end_sample
					
			tracklist_changes = [(tc[0] - mix_buffer_cf_start_sample, tc[1],tc[2]) for tc in tracklist_changes if tc[0] > mix_buffer_cf_start_sample]	
			mix_buffer = mix_buffer[ mix_buffer_cf_start_sample : ]
			current_song.close()
			
			# Go to next song, and select the song after that
			current_song = next_song
			current_song.open()
			f = current_song.tempo / TEMPO	
			cue_master_in = cue_next_in
			prev_fade_type = next_fade_type
			prev_fade_in_len = fade_in_len
			prev_fade_out_len = fade_out_len
			
			cue_master_out, next_fade_type, max_fade_in_len, fade_out_len = tracklister.getMasterQueue(current_song, cue_master_in + fade_in_len, prev_fade_type)
			next_song, cue_next_in, cue_master_out, fade_in_len, semitone_offset = self.tracklister.getBestNextSongAndCrossfade(current_song, cue_master_out, max_fade_in_len, fade_out_len, next_fade_type)
			anchor_sample = int(44100 * current_song.downbeats[cue_master_in])		
			add_song_to_tracklist(current_song, anchor_sample, next_song, next_fade_type, cue_master_out, fade_in_len, fade_out_len)	
			mix_buffer_cf_start_sample = int(f * (current_song.downbeats[cue_master_out] * 44100 - anchor_sample))
			
			f = current_song.tempo / TEMPO		
			current_song.openAudio()
			current_audio_start = int(current_song.downbeats[cue_master_in] * 44100)
			current_audio_end = int((current_song.downbeats[cue_master_out] * 44100) + (fade_in_len + fade_out_len + 2)*samples_per_dbeat/f) # 2 downbeats margin
			current_audio_stretched = time_stretch_and_pitch_shift(current_song.audio[current_audio_start:current_audio_end], f, semitones=semitone_offset)
			
			# Calculate crossfade between *previous* song and current song
			cf = songtransitions.CrossFade(0, [0], prev_fade_in_len + prev_fade_out_len, prev_fade_in_len, prev_fade_type)
			mix_buffer_deepcpy = np.array(mix_buffer,dtype='single',copy=True)
			mix_buffer = cf.apply(mix_buffer_deepcpy, current_audio_stretched, TEMPO)
Ejemplo n.º 39
0
class MultiProcess:
    def __init__(self, verbose):
        log.info('Initializing Multiprocessing Communication')
        self.verbose = verbose
        self.android = Android()
        self.arduino = Arduino()
        self.pc = PC()
        self.detector = SymbolDetector()

        self.msg_queue = Queue()
        self.img_queue = Queue()

    def start(self):
        try:
            self.android.connect()
            self.arduino.connect()
            self.pc.connect()

            Process(target=self.read_android, args=(self.msg_queue, )).start()
            Process(target=self.read_arduino, args=(self.msg_queue, )).start()
            Process(target=self.read_pc,
                    args=(
                        self.msg_queue,
                        self.img_queue,
                    )).start()

            Process(target=self.write_target, args=(self.msg_queue, )).start()

            log.info('Launching Symbol Detector')
            self.detector.start()

            log.info('Multiprocess Communication Session Started')

            while True:
                if not self.img_queue.empty():
                    msg = self.img_queue.get_nowait()
                    if msg == 'TP':
                        log.info('Detecting for Symbols')
                        frame = self.detector.get_frame()
                        symbol_match = self.detector.detect(frame)
                        if symbol_match is not None:
                            log.info('Symbol Match ID: ' + str(symbol_match))
                            self.pc.write('TC|' + str(symbol_match))
                        else:
                            log.info('No Symbols Detected')
                            self.pc.write('TC|0')
        except KeyboardInterrupt:
            raise

    def end(self):
        log.info('Multiprocess Communication Session Ended')

    def read_android(self, msg_queue):
        while True:
            try:
                msg = self.android.read()
                if msg is not None:
                    if self.verbose:
                        log.info('Read Android: ' + str(msg))
                    if msg in ['w1', 'a', 'd', 'h']:
                        msg_queue.put_nowait(format_for('ARD', msg))
                    else:
                        msg_queue.put_nowait(format_for('PC', msg))

            except Exception as e:
                log.error('Android read failed: ' + str(e))
                self.android.connect()

    def read_arduino(self, msg_queue):
        while True:
            msg = self.arduino.read()
            if msg is not None and msg != "Connected":
                if self.verbose:
                    log.info('Read Arduino: ' + str(msg))
                msg_queue.put_nowait(format_for('PC', msg))

    def read_pc(self, msg_queue, img_queue):
        while True:
            msg = self.pc.read()
            if msg is not None:
                if self.verbose:
                    log.info('Read PC: ' + str(msg['target']) + '; ' +
                             str(msg['payload']))
                if msg['target'] == 'android':
                    msg_queue.put_nowait(format_for('AND', msg['payload']))
                elif msg['target'] == 'arduino':
                    msg_queue.put_nowait(format_for('ARD', msg['payload']))
                elif msg['target'] == 'rpi':
                    img_queue.put_nowait(msg['payload'])
                elif msg['target'] == 'both':
                    msg_queue.put_nowait(
                        format_for('AND', msg['payload']['android']))
                    msg_queue.put_nowait(
                        format_for('ARD', msg['payload']['arduino']))

    def write_target(self, msg_queue):
        while True:
            if not msg_queue.empty():
                msg = msg_queue.get_nowait()
                msg = json.loads(msg)
                payload = msg['payload']

                if msg['target'] == 'PC':
                    if self.verbose:
                        log.info('Write PC:' + str(payload))
                    self.pc.write(payload)

                elif msg['target'] == 'AND':
                    if self.verbose:
                        log.info('Write Android:' + str(payload))
                    self.android.write(payload)

                elif msg['target'] == 'ARD':
                    if self.verbose:
                        log.info('Write Arduino:' + str(payload))
                    self.arduino.write(payload)
Ejemplo n.º 40
0
def led_control(led_queue: mp.Queue, demo_mode: mp.Lock):
    strip = led_strip_from_constants()

    # Intialize the library (must be called once before other functions).
    strip.begin()

    def set_all(strip, color):
        for i in range(strip.numPixels()):
            strip.setPixelColor(i, color)
        strip.show()

    set_all(strip, neopixel.Color(0, 0, 0))

    current = defaultdict(
        lambda: RGB(0, 0, 0)
    )  # TODO: add variable type hints once Raspbian includes Python >=3.6
    target = defaultdict(lambda: RGB(0, 0, 0))
    step = defaultdict(lambda: RGB(0, 0, 0))
    loading_anim_process = mp.Process(target=half_loop, args=(strip, ))
    loading_anim_process.start()
    m = led_queue.get()
    loading_anim_process.terminate()
    led_queue.put(m)  # ugly...

    while True:
        t0 = time()
        try:
            message = led_queue.get_nowait()
        except queue.Empty:
            pass
        else:
            if message == "DEMO":
                print("got demo message")
                demo_mode.acquire()
                print("acquired demo lock")
                run_demo(strip, led_queue)
                demo_mode.release()
                continue
            elif message == "BUTTON":
                # pressed the button too late to stay in demo mode, just ignore
                continue

            elif message is None:
                set_all(strip, neopixel.Color(0, 0, 0))
                sleep(0.5)
                break
            for i in range(strip.numPixels()):
                newtarget = message[i][1] if i in message else (0, 0, 0)
                if target[i] != newtarget:
                    target[i] = newtarget
                    step[i] = tuple((t - c) / (LED_SWITCH_TIME / LED_STEP_TIME)
                                    for t, c in zip(target[i], current[i]))

        for i in range(strip.numPixels()):
            current[i] = tuple(c + s for c, s in zip(current[i], step[i]))
            if any((s < 0 and c < t) or (s > 0 and c > t)
                   for s, c, t in zip(step[i], current[i], target[i])):
                step[i] = (0, 0, 0)
                current[i] = target[i]
            color_int = tuple(int(round(c)) for c in current[i])

            # cut off at low intensity to remove perceived flicker
            # using gamma correction would be better maybe?
            color_int = tuple(0 if c < 10 and s < 0 else c
                              for c, s in zip(color_int, step[i]))
            strip.setPixelColor(i, neopixel.Color(*color_int))

        strip.show()
        t1 = time()
        if t1 - t0 < LED_STEP_TIME:
            sleep(LED_STEP_TIME - (t1 - t0))
Ejemplo n.º 41
0
def run_lfads_queue(queue_name,
                    tensorboard_script_path,
                    task_specs,
                    gpu_list=None,
                    one_task_per_gpu=True,
                    max_tasks_simultaneously=None,
                    ignore_donefile=False):

    WAIT_TIME = 0.2

    if 'TMUX' in os.environ:
        print(
            'Warning: tmux sessions will be nested inside the current session')
        del os.environ['TMUX']

    tasks = build_task_list(task_specs)

    gpu_status = query_gpu_status()

    if gpu_list:
        gpu_status = [gpu_status[i] for i in gpu_list]
    num_gpus = len(gpu_status)

    # compute number of tasks we can do simultaneously
    # factoring number of GPUs. If num_gpus == max_tasks_simultaneously, the
    # load balancer will implicitly place one task on each gpu
    num_cpus = cpu_count()
    if one_task_per_gpu:
        # setting max_tasks_simultaneously <= num_gpus ensures that no gpu will
        # ever have more than one task due to the scheduling algorithm
        if max_tasks_simultaneously is None:
            max_tasks_simultaneously = num_gpus
        else:
            max_tasks_simultaneously = min(max_tasks_simultaneously, num_gpus)
    elif max_tasks_simultaneously is None:
        max_tasks_simultaneously = num_cpus - 1

    def print_status(x):
        print('Queue: ' + x.rstrip('\n'))

    # is tensorboard running in tmux?
    tensorboard_session_prefix = '{}_tensorboard'.format(queue_name)
    running_tensorboard_sessions = get_list_tmux_sessions_name_starts_with(
        tensorboard_session_prefix)

    if running_tensorboard_sessions:
        # tensorboard already running
        m = re.search('port(?P<port>\d+)', running_tensorboard_sessions[0])
        port = m.group('port') if m is not None else None
        print_status(
            'TensorBoard already running on port {} in tmux session {}'.format(
                port, running_tensorboard_sessions[0]))
    else:
        # launch the tensorboard on an open port in a tmux session (if not already open)
        port = get_open_port()
        tensorboard_session = '{}_port{}'.format(tensorboard_session_prefix,
                                                 port)
        print_status(
            'Launching TensorBoard on port {} in tmux session {}'.format(
                port, tensorboard_session))
        launch_tensorboard_in_tmux(tensorboard_session,
                                   tensorboard_script_path, port)

    print_status(
        'Initializing with {} GPUs and {} CPUs, max {} simultaneous tasks'.
        format(len(gpu_status), num_cpus, max_tasks_simultaneously))

    # check for tasks already completed
    if not ignore_donefile:
        for task in tasks:
            task.mark_finished_if_donefile_exists()
            if task.skipped_donefile_exists:
                print('Task {}: skipping, task already completed'.format(
                    task.name))

    # communication queue for each process
    message_queue = Queue(100)

    while not check_all_tasks_complete(tasks):

        # check queue for new messages
        do_status_summary = - False
        while message_queue.qsize() > 0:
            try:
                msg = message_queue.get_nowait()

                if type(msg) is TaskStartedMessage:
                    task = tasks[msg.task_index]

                    if msg.pid is not None:
                        # None means the task was already running previously, so don't print anything
                        print(
                            'Task {}: started in tmux session {} on GPU {} with PID {}'
                            .format(task.name, msg.tmux_session,
                                    task.running_on_gpu, msg.pid))

                        # deduct from gpu memory
                        gpu = find_gpu_by_index(gpu_status,
                                                task.running_on_gpu)
                        gpu.memfree -= task.memory_req
                        gpu.incr_num_tasks()

                    sys.stdout.flush()

                elif type(msg) is TaskCompletedMessage:
                    task = tasks[msg.task_index]
                    if msg.success:
                        print('Task {}: completed successfully'.format(
                            task.name))
                    else:
                        task.has_failed = True
                        if len(msg.tail) > 0:
                            print(
                                'Task {}: TERMINATED UNEXPECTEDLY. Final output:'
                                .format(task.name))
                            print(msg.tail)
                        else:
                            print(
                                'Task {}: TERMINATED UNEXPECTEDLY with no output'
                                .format(task.name))

                    task.has_finished = True
                    # return to available gpu memory
                    gpu = find_gpu_by_index(gpu_status, task.running_on_gpu)
                    gpu.memfree += task.memory_req
                    gpu.decr_num_tasks()
                    do_status_summary = True

                    sys.stdout.flush()

                elif type(msg) is TaskExceptionMessage:
                    task = tasks[msg.task_index]
                    task.has_finished = True
                    task.has_failed = True
                    print('Task {}: INTERNAL ERROR. Exception was:'.format(
                        task.name))
                    print(msg.message)
                    do_status_summary = True

                    if task.running_on_gpu is not None:
                        gpu = find_gpu_by_index(gpu_status,
                                                task.running_on_gpu)
                        gpu.memfree += task.memory_req
                        gpu.decr_num_tasks()

                    sys.stdout.flush()

                else:
                    print('Unknown message {}'.format(msg))

            except Empty:
                pass

        # check again since tasks have now been marked complete
        if check_all_tasks_complete(tasks):
            break

        if do_status_summary:
            print_task_status_summary(tasks)

        # only run a certain number of tasks at the same time to avoid inefficient use of the CPUs
        if check_num_tasks_running(tasks) >= max_tasks_simultaneously:
            #print_status('Waiting for free CPU to become available')
            time.sleep(WAIT_TIME)
            continue

        if check_all_tasks_completed_or_running(tasks):
            #print_status('All tasks launched or finished, waiting for last batch to complete')
            time.sleep(WAIT_TIME)
            continue

        # find next task for which there is sufficient GPU memory
        (task_index, gpu_index) = pick_next_task(gpu_status, tasks)

        if task_index is None:
            #print_status('Waiting for GPU memory to become available')
            time.sleep(WAIT_TIME)
            continue

        task = tasks[task_index]
        #print('Task {}: launching on gpu {}'.format(task.name, gpu_index))
        sys.stdout.flush()

        # mark task as running
        task.running_on_gpu = gpu_index

        # launch a process to monitor the task, also receive messages via Queue
        p = Process(target=process_launch_task_in_tmux,
                    args=(message_queue, task, gpu_index, True))
        task.process = p
        p.start()

        time.sleep(WAIT_TIME)

    print_status('All tasks completed.')
    print_task_status_summary(tasks)

    # wait for all the subprocesses to complete, should be quick since all tasks are reported done now
    for task in tasks:
        if task.process is not None:
            task.process.join()

    message_queue.close()

    return tasks
Ejemplo n.º 42
0
class CGMImageConverter(Thread):
    """ Uses CoolUtils CADConverterX activex component to convert CGM images to
    more popular image formats.
    """
    def __init__(self,
                 input_directory: str,
                 output_directory: str,
                 worker_count: int = 3,
                 skip_existing: bool = True,
                 output_extension: str = 'svg'):
        """ Create a new CGM image converter thread.

        Notes:
         * Files not ending with .cgm will be ignored
         * The input_directory is NOT processed recursively

        :param input_directory: Directory that contains CGM images that should
        be converted.
        :param output_directory: Where converted files will be saved.
        :param worker_count: How many processes that should be spawned
        simultaneously to speed up conversion.
        :param skip_existing: Skip conversion of files found in
        output_directory.
        :param output_extension: Format of the output files (e.g. svg or png).
        """
        super().__init__()
        self._running = True

        self._input_directory = input_directory
        self._output_directory = output_directory
        self._worker_count = worker_count
        self._output_extension = output_extension

        self._workers = []  # type: List[Process]
        self._image_queue_priority = Queue()
        self._image_queue = Queue()
        self._images = {}  # type: Dict[str, str]
        self._total_image_count = 0

        for file in glob.glob(os.path.join(input_directory, '*.cgm')):
            if skip_existing:
                name, _ = os.path.splitext(os.path.basename(file))
                o = os.path.join(output_directory,
                                 '{}.{}'.format(name, output_extension))

                if os.path.isfile(o):
                    _log.debug('Skipping {}'.format(file))
                    continue

            _log.debug('Adding {} to queue'.format(file))
            self._add_item(file)
            self._total_image_count += 1

    def prioritize(self, filename: str):
        """ Prioritize the conversion of an image.

        :param filename: Name of the image to prioritize.
        """
        # TODO Not deleting job from self._image_queue
        name, _ = os.path.splitext(os.path.basename(filename))
        file = self._images[name]
        self._image_queue_priority.put(file)

    def graceful_stop(self):
        """ Stop the thread and its sub-processes gracefully.
        """
        self._running = False

    def get_progress(self):
        return 1 - (self._image_queue.qsize() / self._total_image_count)

    def is_done(self):
        return self._image_queue.empty()

    def run(self):
        while self._running:
            self._workers = [w for w in self._workers if w.is_alive()]

            for _ in range(self._worker_count - len(self._workers)):
                self._dispatch_worker()
            time.sleep(1)

        _log.debug('Stopping thread')
        for _ in range(self._worker_count):
            self._image_queue_priority.put(None)
            print('put')

        _log.debug('Emptying queue')
        # The program will (apparently) not exit if the queue is not empty
        try:
            while True:
                self._image_queue.get_nowait()
        except QueueEmpty:
            pass

        _log.debug('Waiting for workers')
        for worker in self._workers:
            worker.join()

    def _add_item(self, filename: str):
        """ Add a new file that should be converted.

        :param filename: File that should be converted.
        """
        name, _ = os.path.splitext(os.path.basename(filename))

        self._image_queue.put(filename)
        self._images[name] = filename

    def _dispatch_worker(self):
        """ Create a new worker process and start it immediately.
        """
        _log.debug('Starting new worker process')
        worker = Process(target=_coolutils_worker,
                         args=(self._output_directory, self._output_extension,
                               self._image_queue_priority, self._image_queue))
        p = psutil.Process(worker.pid)
        p.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS)

        self._workers.append(worker)
        worker.start()
Ejemplo n.º 43
0
class DataLoader(object):
    """
    A custom dataloader to preprocessing observations (including images)
    and feed them to the network.

    Original code for the dataloader from https://github.com/araffin/robotics-rl-srl
    (MIT licence)
    Authors: Antonin Raffin, René Traoré, Ashley Hill

    :param indices: ([int]) list of observations indices
    :param observations: (np.ndarray) observations or images path
    :param actions: (np.ndarray) actions
    :param batch_size: (int) Number of samples per minibatch
    :param n_workers: (int) number of preprocessing worker (for loading the images)
    :param infinite_loop: (bool) whether to have an iterator that can be reset
    :param max_queue_len: (int) Max number of minibatches that can be preprocessed at the same time
    :param shuffle: (bool) Shuffle the minibatch after each epoch
    :param start_process: (bool) Start the preprocessing process (default: True)
    :param backend: (str) joblib backend (one of 'multiprocessing', 'sequential', 'threading'
        or 'loky' in newest versions)
    :param sequential: (bool) Do not use subprocess to preprocess the data
        (slower but use less memory for the CI)
    :param partial_minibatch: (bool) Allow partial minibatches (minibatches with a number of element
        lesser than the batch_size)
    """

    def __init__(self, indices, observations, actions, batch_size, vf_vals=None, n_workers=1,
                 infinite_loop=True, max_queue_len=1, shuffle=False,
                 start_process=True, backend='threading', sequential=False, partial_minibatch=True):
        super(DataLoader, self).__init__()
        self.n_workers = n_workers
        self.infinite_loop = infinite_loop
        self.indices = indices
        self.original_indices = indices.copy()
        self.n_minibatches = len(indices) // batch_size
        # Add a partial minibatch, for instance
        # when there is not enough samples
        if partial_minibatch and len(indices) % batch_size > 0:
            self.n_minibatches += 1
        self.batch_size = batch_size
        self.observations = observations
        self.actions = actions
        self.shuffle = shuffle
        self.queue = Queue(max_queue_len)
        self.process = None
        self.load_images = isinstance(observations[0], str)
        self.backend = backend
        self.sequential = sequential
        self.start_idx = 0
        self.vf_vals = vf_vals
        if start_process:
            self.start_process()

    def start_process(self):
        """Start preprocessing process"""
        # Skip if in sequential mode
        if self.sequential:
            return
        self.process = Process(target=self._run)
        # Make it a deamon, so it will be deleted at the same time
        # of the main process
        self.process.daemon = True
        self.process.start()

    @property
    def _minibatch_indices(self):
        """
        Current minibatch indices given the current pointer
        (start_idx) and the minibatch size
        :return: (np.ndarray) 1D array of indices
        """
        return self.indices[self.start_idx:self.start_idx + self.batch_size]

    # TODO: Fix reward return for value function pretraining
    # Have to sum up future rewards
    # process entire array at start is probably best idea
    # cumulative sum up until each episode start
    # what to do at episode start?
    #

    def sequential_next(self):
        """
        Sequential version of the pre-processing.
        """
        if self.start_idx > len(self.indices):
            raise StopIteration

        if self.start_idx == 0:
            if self.shuffle:
                # Shuffle indices
                np.random.shuffle(self.indices)

        obs = self.observations[self._minibatch_indices]
        if self.load_images:
            obs = np.concatenate([self._make_batch_element(image_path) for image_path in obs],
                                 axis=0)

        actions = self.actions[self._minibatch_indices]
        if self.vf_vals is not None:
            vf_vals = [self.vf_vals[ind] if ind in self.vf_vals else np.nan for ind in self._minibatch_indices]

        self.start_idx += self.batch_size
        if self.vf_vals is not None:
            return obs, actions, vf_vals
        else:
            return obs, actions

    def _run(self):
        start = True
        with Parallel(n_jobs=self.n_workers, batch_size="auto", backend=self.backend) as parallel:
            while start or self.infinite_loop:
                start = False

                if self.shuffle:
                    np.random.shuffle(self.indices)

                for minibatch_idx in range(self.n_minibatches):

                    self.start_idx = minibatch_idx * self.batch_size

                    obs = self.observations[self._minibatch_indices]
                    if self.load_images:
                        if self.n_workers <= 1:
                            obs = [self._make_batch_element(image_path)
                                   for image_path in obs]

                        else:
                            obs = parallel(delayed(self._make_batch_element)(image_path)
                                           for image_path in obs)

                        obs = np.concatenate(obs, axis=0)

                    actions = self.actions[self._minibatch_indices]
                    if self.vf_vals is not None:
                        vf_vals = [self.vf_vals[ind] if ind in self.vf_vals else np.nan for ind in self._minibatch_indices]

                        self.queue.put((obs, actions, vf_vals))
                    else:
                        self.queue.put((obs, actions))

                    # Free memory
                    del obs

                self.queue.put(None)

    @classmethod
    def _make_batch_element(cls, image_path):
        """
        Process one element.

        :param image_path: (str) path to an image
        :return: (np.ndarray)
        """
        # cv2.IMREAD_UNCHANGED is needed to load
        # grey and RGBa images
        image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
        # Grey image
        if len(image.shape) == 2:
            image = image[:, :, np.newaxis]

        if image is None:
            raise ValueError("Tried to load {}, but it was not found".format(image_path))
        # Convert from BGR to RGB
        if image.shape[-1] == 3:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = image.reshape((1,) + image.shape)
        return image

    def __len__(self):
        return self.n_minibatches

    def __iter__(self):
        self.start_idx = 0
        self.indices = self.original_indices.copy()
        return self

    def __next__(self):
        if self.sequential:
            return self.sequential_next()

        if self.process is None:
            raise ValueError("You must call .start_process() before using the dataloader")
        while True:
            try:
                val = self.queue.get_nowait()
                break
            except queue.Empty:
                time.sleep(0.001)
                continue
        if val is None:
            raise StopIteration
        return val

    def __del__(self):
        if self.process is not None:
            self.process.terminate()
Ejemplo n.º 44
0
class AsyncVideoWriter:
    """Video Writer"""

    def __init__(self, config, finished=None):
        self.config = config
        self.fourcc = cv2.VideoWriter_fourcc(*self.config.store_codec)
        self.video_out = None
        self.stop_time = None
        self.stopped = True
        self.activity_count = 0
        self.is_writing = False
        self.filename = None
        self.frame_queue = Queue(1024)
        self.writer_thread = None
        self.finished = finished

    def start(self, filename):
        if not self.is_writing:
            self.stopped = False
            self.is_writing = True
            self.filename = filename
            self.writer_thread = Thread(target=self._writer_thread, args=())
            self.writer_thread.start()
            print("[INFO] start recording {} {:.2f} s".format(self.filename, time.perf_counter()))
            return True
        return False

    def stop(self, activity_count):
        if not self.stopped:
            self.stop_time = time.perf_counter()
            self.stopped = True
            self.activity_count = activity_count
            # never block the main processing loop
            # if self.writer_thread is not None:
            #    self.writer_thread.join()
            print("[INFO] stop recording {} {:.2f} s, activity: {}"
                  .format(self.filename, self.stop_time, activity_count))

    def write(self, frame):
        if not self.stopped:
            if not self.frame_queue.full():
                self.frame_queue.put(frame)
            else:
                print("[WARNING] Writer queue is full, system will clean up the whole queue. It will result in frame "
                      "drops.")
                self._clean_queue()

    def _writer_thread(self):
        video_out = cv2.VideoWriter(self.filename, self.fourcc, self.config.frame_rate,
                                    (self.config.resolution[0], self.config.resolution[1]))
        empty = False
        while not self.stopped or not empty:
            try:
                frame = self.frame_queue.get(block=True, timeout=1)
            except Empty:
                # print("Queue empty")
                empty = True
                continue
            video_out.write(frame)

        finished_time = time.perf_counter()
        print("[INFO] encoding finished {} {:.2f} s, time lag {:.2f} s"
              .format(self.filename, finished_time, finished_time-self.stop_time))
        video_out.release()
        if self.activity_count < self.config.store_activity_count_threshold:
            print("[INFO] remove recording {} due to less activity {}".format(self.filename, self.activity_count))
            os.remove(self.filename)
        else:
            if self.finished is not None:
                self.finished(self.filename)
        self.is_writing = False

    def _clean_queue(self):
        try:
            while True:
                self.frame_queue.get_nowait()
        except Empty:
            pass
Ejemplo n.º 45
0
class StrategyEngine(object):
    '''策略引擎'''
    def __init__(self, logger, eg2uiQueue, ui2egQueue):
        self.logger = logger
        
        # Engine->Ui, 包括资金,权益等
        self._eg2uiQueue = eg2uiQueue
        # Ui->Engine, 包括策略加载等
        self._ui2egQueue = ui2egQueue
        
    def _initialize(self):
        '''进程中初始化函数'''
        self.logger.info('Initialize strategy engine!')
        
        # 数据模型
        self._dataModel = DataModel(self.logger)
        self._qteModel = self._dataModel.getQuoteModel()
        self._hisModel = self._dataModel.getHisQuoteModel()
        self._trdModel = self._dataModel.getTradeModel()
        
        # api回调函数
        self._regApiCallback()
        # 策略发送函数
        self._regMainWorkFunc()
        
        # Api->Engine, 品种、行情、K线、交易等
        self._api2egQueue = queue.Queue()
        # Strategy->Engine, 初始化、行情、K线、交易等
        self._st2egQueue = Queue()
        # 创建主处理线程, 从api和策略进程收数据处理
        self._startMainThread()
        # 创建_pyApi对象
        self._pyApi = PyAPI(self.logger, self._api2egQueue) 
        
        # 策略编号,自增
        self._maxStrategyId = 1
        # 创建策略管理器
        self._strategyMgr = StartegyManager(self.logger, self._st2egQueue)
        
        # 策略进程队列列表
        self._eg2stQueueDict = {} #{strategy_id, queue}
        
        # 即时行情订阅列表
        self._contStrategyDict = {} #{'contractNo' : [strategyId1, strategyId2...]}
        # 历史K线订阅列表
        self._hisContStrategyDict = {} #{'contractNo' : [strategyId1, strategyId2...]}
        
        self.logger.debug('Initialize strategy engine ok!')
        
    def _regApiCallback(self):
        self._apiCallbackDict = {
            EEQU_SRVEVENT_CONNECT           : self._onApiConnect               ,
            EEQU_SRVEVENT_DISCONNECT        : self._onApiDisconnect            ,
            EEQU_SRVEVENT_EXCHANGE          : self._onApiExchange              ,
            EEQU_SRVEVENT_COMMODITY         : self._onApiCommodity             ,
            EEQU_SRVEVENT_CONTRACT          : self._onApiContract              ,
            EEQU_SRVEVENT_TIMEBUCKET        : self._onApiTimeBucket            ,
            EEQU_SRVEVENT_QUOTESNAP         : self._onApiSnapshot              ,
            EEQU_SRVEVENT_QUOTESNAPLV2      : self._onApiDepthQuote            ,
            EEQU_SRVEVENT_HISQUOTEDATA      : self._onApiKlinedataRsp          ,
            EEQU_SRVEVENT_HISQUOTENOTICE    : self._onApiKlinedataNotice       ,
            EEQU_SRVEVENT_TRADE_LOGINQRY    : self._onApiLoginInfo             ,
            EEQU_SRVEVENT_TRADE_USERQRY     : self._onApiUserInfo              ,
            EEQU_SRVEVENT_TRADE_LOGINNOTICE : self._onApiLoginInfo             ,
            EEQU_SRVEVENT_TRADE_ORDERQRY    : self._onApiOrderDataQry          ,
            EEQU_SRVEVENT_TRADE_ORDER       : self._onApiOrderData             ,
            EEQU_SRVEVENT_TRADE_MATCHQRY    : self._onApiMatchDataQry           ,
            EEQU_SRVEVENT_TRADE_MATCH       : self._onApiMatchData             ,
            EEQU_SRVEVENT_TRADE_POSITQRY    : self._onApiPosDataQry            ,
            EEQU_SRVEVENT_TRADE_POSITION    : self._onApiPosData               ,
            EEQU_SRVEVENT_TRADE_FUNDQRY     : self._onApiMoney                 ,
            EV_EG2ST_ACTUAL_ORDER_SESSION_MAP : self._onOrderSessionMap,
        }
        
    def _regMainWorkFunc(self):
        self._mainWorkFuncDict = {
            EV_ST2EG_EXCHANGE_REQ           : self._onExchange                 ,
            EV_ST2EG_COMMODITY_REQ          : self._reqCommodity               ,
            EV_ST2EG_SUB_QUOTE              : self._reqSubQuote                ,
            EV_ST2EG_UNSUB_QUOTE            : self._reqUnsubQuote              ,
            EV_ST2EG_SUB_HISQUOTE           : self._reqSubHisquote             ,
            EV_ST2EG_UNSUB_HISQUOTE         : self._reqUnsubHisquote           ,
            EV_ST2EG_SWITCH_STRATEGY        : self._reqKLineStrategySwitch     ,
            #
            EV_ST2EG_NOTICE_KLINEDATA       : self._sendKLineData,
            EV_ST2EG_UPDATE_KLINEDATA       : self._sendKLineData,

            # k line series
            EV_ST2EG_ADD_KLINESERIES        : self._addSeries,
            EV_ST2EG_NOTICE_KLINESERIES     : self._sendKLineSeries,
            EV_ST2EG_UPDATE_KLINESERIES     : self._sendKLineSeries,

            # k line signal
            EV_ST2EG_ADD_KLINESIGNAL        : self._addSignal,
            EV_ST2EG_NOTICE_KLINESIGNAL     : self._sendKLineSignal,
            EV_ST2EG_UPDATE_KLINESIGNAL     : self._sendKLineSignal,

            # 暂停、恢复、与退出
            EV_UI2EG_STRATEGY_PAUSE         : self._onStrategyPause,
            EV_UI2EG_STRATEGY_RESUME        : self._onStrategyResume,
            EV_UI2EG_EQUANT_EXIT            : self._onEquantExit,

            EV_ST2EG_UPDATE_STRATEGYDATA    : self._reqStrategyDataUpdateNotice,
            EV_EG2UI_REPORT_RESPONSE        : self._reportResponse,
            EV_EG2UI_CHECK_RESULT           : self._checkResponse,
            EV_EG2ST_MONITOR_INFO           : self._monitorResponse,

            # load strategy
            EV_EG2UI_LOADSTRATEGY_RESPONSE  : self._loadStrategyResponse,
            EV_EG2UI_STRATEGY_STATUS        : self._starategyStatus,

            EV_ST2EG_STRATEGYTRADEINFO      : self._reqTradeInfo,
            EV_ST2EG_ACTUAL_ORDER           : self._sendOrder,
            EV_ST2EG_ACTUAL_CANCEL_ORDER    : self._deleteOrder,
            EV_ST2EG_ACTUAL_MODIFY_ORDER    : self._modifyOrder,
        }
            
    def run(self):
        # 在当前进程中初始化
        self._initialize()
        
        while True:
            self._handleUIData()
            
    def _sendEvent2Strategy(self, strategyId, event):
        if strategyId not in self._eg2stQueueDict:
            return
        eg2stQueue = self._eg2stQueueDict[strategyId]
        eg2stQueue.put(event)
        
    def _sendEvent2AllStrategy(self, event):
        for id in self._eg2stQueueDict:
            self._eg2stQueueDict[id].put(event)
        
    def _dispathQuote2Strategy(self, code, apiEvent):
        '''分发即时行情'''
        apiData = apiEvent.getData()
        contractNo = apiEvent.getContractNo()
        contStList = self._contStrategyDict[contractNo]
        
        data = apiData[:]
        
        msg = {
            'EventSrc'     :  EEQU_EVSRC_ENGINE,
            'EventCode'    :  code,
            'StrategyId'   :  0,
            'SessionId'    :  0,
            'UserNo'       :  '',
            'ContractNo'   :  contractNo,
            'Data'         :  data
        }
        
        event = Event(msg)
        
        for id in contStList:
            self._sendEvent2Strategy(id, event)
            
    # //////////////////////UI事件处理函数/////////////////////
    def _handleUIData(self):
        try:
            event = self._ui2egQueue.get()
            if type(event) is dict:
                event = Event(event)
            code  = event.getEventCode()
            if code == EV_UI2EG_LOADSTRATEGY:
                # 加载策略事件
                self._loadStrategy(event)
            elif code == EV_UI2EG_REPORT:
                self._noticeStrategyReport(event)
        except queue.Empty as e:
            pass

    #
    def _noticeStrategyReport(self, event):
        self._strategyMgr.sendEvent2Strategy(event.getStrategyId(), event)

    def _getStrategyId(self):
        id = self._maxStrategyId
        self._maxStrategyId += 1
        return id

    def _loadStrategy(self, event):
        id = self._getStrategyId()
        eg2stQueue = Queue(2000)
        self._eg2stQueueDict[id] = eg2stQueue
        self._strategyMgr.create(id, eg2stQueue, event)

        # =================
        self._strategyMgr.sendEvent2Strategy(id, event)

    def _loadStrategyResponse(self, event):
        self._eg2uiQueue.put(event)
        
    def _starategyStatus(self, event):
        self._eg2uiQueue.put(event)
        
    #////////////////api回调及策略请求事件处理//////////////////
    def _handleApiData(self):
        try:
            apiEvent = self._api2egQueue.get_nowait()
            code  = apiEvent.getEventCode()
            # print("c api code =", code)
            if code not in self._apiCallbackDict:
                return
            self._apiCallbackDict[code](apiEvent)
        except queue.Empty as e:
            pass
            
    def _handelStData(self):
        try:
            event = self._st2egQueue.get_nowait()
            code  = event.getEventCode()
            if code not in self._mainWorkFuncDict:
                self.logger.debug('Event %d not register in _mainWorkFuncDict'%code)
                #print("未处理的event code =",code)
                return
            self._mainWorkFuncDict[code](event)
        except (queue.Empty, KeyError) as e:
            if e is KeyError:
                event.printTool()
                print(" now code is ", code)
            pass
            
    def _mainThreadFunc(self):
        while True:
            self._handleApiData()
            self._handelStData()
            #time.sleep(0.01)
            
    def _startMainThread(self):
        '''从api队列及策略队列中接收数据'''
        self._apiThreadH = Thread(target=self._mainThreadFunc)
        self._apiThreadH.start()
        
    def _moneyThreadFunc(self):
        while True:
            eventList = self._trdModel.getMoneyEvent()
            #查询所有账户下的资金
            for event in eventList:
                self._reqMoney(event)
                
            time.sleep(60)
                
    def _createMoneyTimer(self):
        '''资金查询线程'''
        self._moneyThreadH = Thread(target=self._moneyThreadFunc)
        self._moneyThreadH.start()
        
    #////////////////api回调事件//////////////////////////////
    def _onApiConnect(self, apiEvent):
        self._pyApi.reqExchange(Event({'StrategyId':0, 'Data':''}))
        
    def _onApiDisconnect(self, apiEvent):
        '''
        断连事件:区分与9.5/交易/即时行情/历史行情
            1. 与9.5断连:
                a. 停止所有策略(包括回测与运行)
                b. 通知界面断连状态
                c. 设置引擎状态为与9.5断连
                d. 清理所有数据,重置数据状态
            2. 与即时行情断连
                a. 停止所有策略(运行)
                b  通知界面断连状态
                c. 设置引擎状态为与即时行情断连
                d. 清理所有即时行情数据
                
            3. 与历史行情断连
                a. 停止所有策略(包括回测和运行)
                b. 通知界面断连状态
                c. 设置引擎状态为与历史行情断连
                d. 清理所有历史K线数据
                
            4. 与交易断连
                a. 停止所有策略(运行)
                b. 通知界面断连状态
                c. 设置引擎状态为与交易断开链接
                d. 清理所有交易数据
                
            说明:策略停止后,所有相关数据清理
                
        '''
        #
        
    
    def _onApiExchange(self, apiEvent):  
        self._qteModel.updateExchange(apiEvent)

        self._sendEvent2Strategy(apiEvent.getStrategyId(), apiEvent)

        self._eg2uiQueue.put(apiEvent)
        if apiEvent.isChainEnd():
            self._pyApi.reqCommodity(Event({'StrategyId':0, 'Data':''}))
        
    def _onApiCommodity(self, apiEvent):
        self._qteModel.updateCommodity(apiEvent)
        self._eg2uiQueue.put(apiEvent)

        if apiEvent.isChainEnd():
            self._pyApi.reqContract(Event({'StrategyId':0, 'Data':''}))

        # 发送商品交易时间模板请求
        dataList = apiEvent.getData()
        for dataDict in dataList:
            event = Event({
                'EventCode': EV_ST2EG_TIMEBUCKET_REQ,
                'StrategyId': apiEvent.getStrategyId(),
                'Data': dataDict['CommodityNo'],
            })
            self._pyApi.reqTimebucket(event)
        
    def _onApiContract(self, apiEvent):  
        self._qteModel.updateContract(apiEvent)
        self._eg2uiQueue.put(apiEvent)
        if apiEvent.isChainEnd():
            self._pyApi.reqQryLoginInfo(Event({'StrategyId':0, 'Data':''}))
        
    def _onApiTimeBucket(self, apiEvent):
        self._qteModel.updateTimeBucket(apiEvent)
        
    def _onApiSnapshot(self, apiEvent):
        self._qteModel.updateLv1(apiEvent)
        self._dispathQuote2Strategy(EV_EG2ST_SNAPSHOT_NOTICE, apiEvent)
        
    def _onApiDepthQuote(self, apiEvent):
        self._qteModel.updateLv2(apiEvent)
        self._dispathQuote2Strategy(EV_EG2ST_DEPTH_NOTICE, apiEvent)
        
    def _onApiKlinedataRsp(self, apiEvent):
        self._onApiKlinedata(apiEvent, EV_EG2ST_HISQUOTE_RSP)
        
    def _onApiKlinedataNotice(self, apiEvent):
        self._onApiKlinedata(apiEvent, EV_EG2ST_HISQUOTE_NOTICE)
        
    def _onApiKlinedata(self, apiEvent, code):
        self._hisModel.updateKline(apiEvent)
        strategyId = apiEvent.getStrategyId()
        #策略号为0,认为是推送数据
        apiData = apiEvent.getData()
        data = apiData[:]
        event = Event({
            'StrategyId' : strategyId,
            'EventCode'  : code,
            'ChainEnd'   : apiEvent.getChain(),
            'ContractNo' : apiEvent.getContractNo(),
            'KLineType'  : apiEvent.getKLineType(),
            'KLineSlice' : apiEvent.getKLineSlice(),
            'Data'       : data
        })
        
        if strategyId > 0:
            self._sendEvent2Strategy(strategyId, event)
            return
            
        #推送数据,分发
        contNo = apiEvent.getContractNo()
        if contNo not in self._hisContStrategyDict:
            return

        stDict = self._hisContStrategyDict[contNo]
        for key in stDict:
            event.setStrategyId(key)
            self._sendEvent2Strategy(key, event)

    # 用户登录信息
    def _onApiLoginInfo(self, apiEvent):
        self._trdModel.updateLoginInfo(apiEvent)
        self._sendEvent2AllStrategy(apiEvent)

        if not apiEvent.isChainEnd():
            return       
        if not apiEvent.isSucceed():
            return

        self._trdModel.setStatus(TM_STATUS_LOGIN)
        self._reqUserInfo(Event({'StrategyId':0, 'Data':''}))

    # 账户信息
    def _onApiUserInfo(self, apiEvent):
        self._trdModel.updateUserInfo(apiEvent)
        self._eg2uiQueue.put(apiEvent)
        # print("++++++ 账户信息 引擎 ++++++", apiEvent.getData())
        self._sendEvent2AllStrategy(apiEvent)

        if not apiEvent.isChainEnd():
            return       
        if not apiEvent.isSucceed():
            return
        
        self._trdModel.setStatus(TM_STATUS_USER)
        # 查询所有账户下委托信息
        eventList = self._trdModel.getOrderEvent()

        for event in eventList:
            # print("====== 查询所有账户下委托信息 ======", event.getData())
            self._reqOrder(event)
        
    def _onApiOrderDataQry(self, apiEvent):
        self._trdModel.updateOrderData(apiEvent)
        # print("++++++ 订单信息 引擎 查询 ++++++", apiEvent.getData())
        # TODO: 分块传递
        self._sendEvent2AllStrategy(apiEvent)

        if not apiEvent.isChainEnd():
            return
        if not apiEvent.isSucceed():
            return
            
        self._trdModel.setStatus(TM_STATUS_ORDER)
        #查询所有账户下成交信息
        eventList = self._trdModel.getMatchEvent()
        for event in eventList:
            self._reqMatch(event)
        
    def _onApiOrderData(self, apiEvent):
        # 订单信息
        self._trdModel.updateOrderData(apiEvent)
        # print("++++++ 订单信息 引擎 变化 ++++++", apiEvent.getData())
        # TODO: 分块传递
        self._sendEvent2AllStrategy(apiEvent)
        
    def _onApiMatchDataQry(self, apiEvent):
        self._trdModel.updateMatchData(apiEvent)
        # print("++++++ 成交信息 引擎 查询 ++++++", apiEvent.getData())
        # TODO: 分块传递
        self._sendEvent2AllStrategy(apiEvent)

        if not apiEvent.isChainEnd():
            return
        if not apiEvent.isSucceed():
            return
            
        self._trdModel.setStatus(TM_STATUS_MATCH)
        #查询所有账户下成交信息
        eventList = self._trdModel.getPositionEvent()
        for event in eventList:
            self._reqPosition(event)
            
    def _onApiMatchData(self, apiEvent):
        # 成交信息
        self._trdModel.updateMatchData(apiEvent)
        # print("++++++ 成交信息 引擎 变化 ++++++", apiEvent.getData())
        # TODO: 分块传递
        self._sendEvent2AllStrategy(apiEvent)
        
    def _onApiPosDataQry(self, apiEvent):
        self._trdModel.updatePosData(apiEvent)
        # print("++++++ 持仓信息 引擎 查询 ++++++", apiEvent.getData())
        # TODO: 分块传递
        self._sendEvent2AllStrategy(apiEvent)

        if not apiEvent.isChainEnd():
            return
        if not apiEvent.isSucceed():
            return
            
        self._trdModel.setStatus(TM_STATUS_POSITION)
        
        #交易基础数据查询完成,定时查询资金
        self._createMoneyTimer()
            
    def _onApiPosData(self, apiEvent):
        # 持仓信息
        self._trdModel.updatePosData(apiEvent)
        # print("++++++ 持仓信息 引擎 变化 ++++++", apiEvent.getData())
        # TODO: 分块传递
        self._sendEvent2AllStrategy(apiEvent)

    def _onApiMoney(self, apiEvent):
        # 资金信息
        self._trdModel.updateMoney(apiEvent)
        # print("++++++ 资金信息 引擎 ++++++", apiEvent.getData())
        self._sendEvent2AllStrategy(apiEvent)

    def _onOrderSessionMap(self, event):
        self._sendEvent2Strategy(event.getStrategyId(), event)

    def _reqTradeInfo(self, event):
        '''
        查询账户信息,如果用户未登录,则Data返回为空
        '''
        stragetyId = event.getStrategyId()
        if len(self._trdModel._loginInfo) == 0:
            trdEvent = Event({
                'EventCode': EV_EG2ST_TRADEINFO_RSP,
                'StrategyId': stragetyId,
                'Data': '',
            })
            self._sendEvent2Strategy(stragetyId, trdEvent)
            return 0

        data = {
            'loginInfo' : {}, # 登录账号信息
            'userInfo'  : {}, # 资金账号信息
        }
        # 登录账号信息
        loginInfoDict = {}
        for userNo, tLoginModel in self._trdModel._loginInfo.items():
            loginInfoDict[userNo] = tLoginModel.copyLoginInfoMetaData()
        data['loginInfo'] = loginInfoDict

        # 资金账号信息
        userInfoDict = {}
        for userNo, tUserInfoModel in self._trdModel._userInfo.items():
            userInfoDict[userNo] = tUserInfoModel.formatUserInfo()
        data['userInfo'] = userInfoDict

        stragetyId = event.getStrategyId()
        trdEvent = Event({
            'EventCode': EV_EG2ST_TRADEINFO_RSP,
            'StrategyId': stragetyId,
            'Data': data,
        })
        self._sendEvent2Strategy(stragetyId, trdEvent)

    #///////////////策略进程事件////////////////////////////// 
    def _addSubscribe(self, contractNo, strategyId):
        stDict = self._contStrategyDict[contractNo]
        # 重复订阅
        if strategyId in stDict:
            return
        stDict[strategyId] = None
            
    def _sendQuote(self, contractNo, strategyId):
        event = self._qteModel.getQuoteEvent(contractNo, strategyId)
        self._sendEvent2Strategy(strategyId, event)

    def _onExchange(self, event):
        '''查询交易所信息'''
        revent = self._qteModel.getExchange()
        self._sendEvent2Strategy(event.getStrategyId(), revent)

    def _reqCommodity(self, event):
        '''查询品种信息'''
        revent = self._qteModel.getCommodity()
        self._sendEvent2Strategy(event.getStrategyId(), revent)
    
    def _reqSubQuote(self, event):
        '''订阅即时行情'''
        contractList = event.getData()
        strategyId = event.getStrategyId()
        
        subList = []
        for contractNo in contractList:
            if contractNo not in self._contStrategyDict:
                subList.append(contractNo)
                self._contStrategyDict[contractNo] = {strategyId:None}
            else:
                if strategyId in self._contStrategyDict[contractNo]:
                    continue #重复订阅,不做任何处理
                self._contStrategyDict[contractNo][strategyId] = None
                self._sendQuote(contractNo, strategyId)
        
        if len(subList) > 0:
            event.setData(subList)
            self._pyApi.reqSubQuote(event)
    
    def _reqUnsubQuote(self, event):
        '''退订即时行情'''
        strategyId = event.getStrategyId()
        contractList = event.getData()
        
        unSubList = []
        for contNo in contractList:
            if contNo not in self._contStrategyDict:
                continue #该合约没有订阅
            stDict = self._contStrategyDict[contNo]
            if strategyId not in stDict:
                continue #该策略没有订阅
            stDict.pop(strategyId)
            #已经没有人订阅了,退订吧
            if len(stDict) <= 0:
                unSubList.append(contNo)
                
        if len(unSubList) > 0:
            event.setData(unSubList)
            self._pyApi.reqUnsubQuote(event)
        
    # def _reqTimebucket(self, event):
    #     '''查询时间模板'''
    #     self._pyApi.reqTimebucket(event)
        
    def _reqSubHisquote(self, event): 
        '''订阅历史行情'''
        data = event.getData()
        if data['NeedNotice'] == EEQU_NOTICE_NOTNEED:
            self._pyApi.reqSubHisquote(event)
            return
        
        strategyId = event.getStrategyId()
        data = event.getData()
        contNo = data['ContractNo']

        if contNo not in self._hisContStrategyDict:
            self._hisContStrategyDict[contNo] = {strategyId:None}
        
        stDict = self._hisContStrategyDict[contNo]  
        if strategyId not in stDict:
            stDict[strategyId] = None
            
        self._pyApi.reqSubHisquote(event)
        
    def _reqUnsubHisquote(self, event):
        '''退订历史行情'''
        strategyId = event.getStrategyId()
        data = event.getData()
        contNo = data['ContractNo']
        
        if contNo not in self._hisContStrategyDict:
            return #该合约没有订阅
        stDict = self._hisContStrategyDict[contNo]

        if strategyId not in stDict:
            return #该策略没有订阅
        stDict.pop(strategyId)
        #已经没有人订阅了,退订吧
        unSubList = []
        if len(stDict) <= 0:
            unSubList.append(contNo)
        if len(unSubList) > 0:
            self._pyApi.reqUnsubHisquote(event)
        
    def _reqKLineStrategySwitch(self, event):
        '''切换策略图'''
        self._pyApi.reqKLineStrategySwitch(event)
        
    def _reqKLineDataResult(self, event):
        '''推送回测K线数据'''
        self._pyApi.reqKLineDataResult(event)
        
    def _reqKLineDataResultNotice(self, event):
        '''更新实盘K线数据'''
        self._pyApi.reqKLineDataResultNotice(event)
        
    def _reqAddKLineSeriesInfo(self, event):
        '''增加指标数据'''
        self._pyApi.addSeries(event)
        
    def _reqKLineSeriesResult(self, event):
        '''推送回测指标数据'''
        self._pyApi.sendSeries(event)
        
    def _reqAddKLineSignalInfo(self, event):
        '''增加信号数据'''
        self._pyApi.addSignal(event)
        
    def _reqKLineSignalResult(self, event):
        '''推送回测信号数据'''
        self._pyApi.sendSignal(event)
        
    def _reqStrategyDataUpdateNotice(self, event):
        '''刷新指标、信号通知'''
        self._pyApi.reqStrategyDataUpdateNotice(event)

    def _reportResponse(self, event):
        #print(" engine 进程,收到策略进程的report 结果,并向ui传递")
        self._eg2uiQueue.put(event)

    def _checkResponse(self, event):
        #print(" engine 进程,收到策略进程的检查结果,并向ui传递")
        self._eg2uiQueue.put(event)

    def _monitorResponse(self, event):
        self._eg2uiQueue.put(event)
    ################################交易请求#########################
    def _reqUserInfo(self, event):
        self._pyApi.reqQryUserInfo(event)
        
    def _reqOrder(self, event):
        self._pyApi.reqQryOrder(event)
        
    def _reqMatch(self, event):
        self._pyApi.reqQryMatch(event)
        
    def _reqPosition(self, event):
        self._pyApi.reqQryPosition(event)
         
    def _reqMoney(self, event):
        self._pyApi.reqQryMoney(event)

    def _sendOrder(self, event):
        # 委托下单,发送委托单
        self._pyApi.reqInsertOrder(event)

    def _deleteOrder(self, event):
        # 委托撤单
        self._pyApi.reqCancelOrder(event)

    def _modifyOrder(self, event):
        # 委托改单
        self._pyApi.reqModifyOrder(event)

    def _sendKLineData(self, event):
        '''推送K线数据'''
        if event.getEventCode() == EV_ST2EG_NOTICE_KLINEDATA:
            self._pyApi.sendKLineData(event, 'N')
        elif event.getEventCode() == EV_ST2EG_UPDATE_KLINEDATA:
            self._pyApi.sendKLineData(event, 'U')

    def _addSeries(self, event):
        '''增加指标线'''
        self._pyApi.addSeries(event)

    def _sendKLineSeries(self, event):
        '''推送指标数据'''
        if event.getEventCode() == EV_ST2EG_NOTICE_KLINESERIES:
            self._pyApi.sendKLineSeries(event, 'N')
        elif event.getEventCode() == EV_ST2EG_UPDATE_KLINESERIES:
            self._pyApi.sendKLineSeries(event, 'U')

    def _addSignal(self, event):
        '''增加信号线'''
        self._pyApi.addSignal(event)

    def _sendKLineSignal(self, event):
        '''推送信号数据'''
        if event.getEventCode() == EV_ST2EG_NOTICE_KLINESIGNAL:
            self._pyApi.sendKLineSignal(event, 'N')
        elif event.getEventCode() == EV_ST2EG_UPDATE_KLINESIGNAL:
            self._pyApi.sendKLineSignal(event, 'U')

    # 暂停当前策略
    def _onStrategyPause(self, event):
        self._sendEvent2Strategy(event.getStrategyId(), event)

    # 恢复当前策略
    def _onStrategyResume(self, event):
        self._sendEvent2Strategy(event.getStrategyId(), event)

    #  当量化退出时,发事件给所有的策略
    def _onEquantExit(self, event):
        # self._sendEvent2AllStrategy(event)
        import json
        # 保存到文件
        context = self._strategy.getEnvironment()
        jsonFile = open('StrategyContext.json', 'w', encoding='utf-8')
        json.dump(context, jsonFile, ensure_ascii=False, indent=4)
Ejemplo n.º 46
0
def runspice(cirfile,batch=True):

    print( "*** " + cirfile)
    """ ltspice path """
    exe = os.path.join("C:\\","Program Files","LTC","LTspiceIV","scad3.exe")
    exe = os.path.join("C:\\","Program Files (x86)","LTC","LTspiceIV","scad3.exe")
    exe = os.path.join("C:\\","Program Files","LTC","LTspiceXVII","XVIIx64.exe")
    #test = os.path.join(os.environ['HOME'],".wine","drive_c", \
    #        "Program Files (x86)","LTC","LTspiceIV","scad3.exe")
    #if os.path.isfile(test) :
    #    exe = os.path.join("C:\\","Program Files (x86)","LTC","LTspiceIV","scad3.exe")

    """ linux/wine scad call """
    scad = ["wine",exe,'-wine','-b']

    """ for windows """
    windows = False
    #print( sys.platform)
    if sys.platform == "darwin":
        #exe = os.path.join("C:","Program\ Files","LTC","LTspiceXVII","XVIIx86.exe")

        exe = os.path.join(os.environ['HOME'],".wine","drive_c", \
                "Program Files (x86)","LTC","LTspiceIV","scad3.exe")
        exe = os.path.join(os.environ['HOME'],".wine","drive_c", \
                "Program Files (x86)","LTC","LTspiceXVII","XVIIx86.exe")
        scad = ["wine",exe,'-wine','-b']
        scad = ['wine C:/Program\ Files/LTC/LTspiceXVII/XVIIx86.exe -wine -b']
        print( scad)
    elif "win" in sys.platform :
        #print( "Windows")
        windows = True
        scad = [exe,'-b']
        if not batch:
            scad = [exe]
    elif "linux" in sys.platform :
        if 'Microsoft' in platform.uname().release:
            #print ("Windows Subsystem for Linux")
            exe = "/mnt/c/Program\ Files/LTC/LTspiceXVII/XVIIx64.exe"
            #print( "linux")
            windows = True
            scad = [exe,'-b']
            if not batch:
                scad = [exe]
  
    if not os.path.isfile(exe):
        print( exe)

    """
    TODO: 
    LTSPICE has to be run in whatever path the cir file is sitting or the library links dont work
    """
    ln = cirfile.split(".")
    extension = ln.pop()
    basename = ".".join(ln)
    logfile = basename + '.log'

    #setup subprocess to start ltspice
    spiceMsgQueue = Queue()
    spice = Process(target=runLTspice, args=(cirfile,scad,spiceMsgQueue))

    #os.remove the old log files
    if(not os.path.isfile(cirfile)):
        print( "the cirfile %s does not exist" % cirfile)
        return []
    if(os.path.isfile(logfile)): os.remove(logfile)

    spice.start()
    
    #setup subprocess to follow the log file
    parent_conn, child_conn = Pipe()
    follow = Process(target=tail, args=(logfile,child_conn))
    follow.start()

    spiceDone = False
    log = []
    while not spiceDone:
        try:
            xdone = spiceMsgQueue.get_nowait()
            if(xdone == "Done"):
                spiceDone = True
                break
        except: 
            pass

        if parent_conn.poll():
            rcv = parent_conn.recv()
            log.append(rcv)
            #print( "%s" % rcv)
            ##sys.stdout.write("%-80s\r" % rcv)
        time.sleep(0.01)
    
    parent_conn.send("Done")
    while parent_conn.poll():
        rcv = parent_conn.recv()
        #print( "%s" % rcv)
        log.append(rcv)
        #time.sleep(0.02)

    spice.join()
    follow.join()
    return log
Ejemplo n.º 47
0
class RedisListener(GObject.GObject):
    """
RedisListener:
Uses the blocking pubsub.listen() on another process, when a message arrives
it is sent to the parent and a 'message' signal is emitted.

The 'redis' parameter is a connection like the one from  calling redis.Redis()
    """
    __gsignals__ = {
        'message': (GObject.SIGNAL_RUN_FIRST, None, [GObject.TYPE_PYOBJECT]),
    }

    def __init__(self, redis, client_id=None):
        GObject.GObject.__init__(self)
        if client_id is None:
            self.client_id = unicode(uuid.uuid4())
        else:
            self.client_id = client_id

        pubsub = redis.pubsub()
        pubsub.subscribe('__RedisListener_' + unicode(self.client_id))

        self.pubsub = pubsub
        self.queue = Queue()

        GLib.timeout_add(50, self.check_queue)
        self.create_worker()

    def create_worker(self):
        kwargs = {
            'pubsub': self.pubsub,
            'queue': self.queue,
            'client_id': self.client_id
        }
        self.worker = Process(target=self.worker_fn, kwargs=kwargs)
        self.worker.start()

    def worker_fn(self,
                  pubsub=None,
                  queue=None,
                  client_id=None,
                  *args,
                  **kwargs):
        """This is executed on *another* process to sidestep blocking reads.
        As soon as we get something we send it to the other side to be consumed later.
        """
        g = pubsub.listen()
        while True:
            try:
                msg = g.next()
                queue.put(msg)
            except StopIteration:
                logging.error('Redis: got StopIteration')

    def check_queue(self):
        """This is executed on the main process, when something arrives we just
        emit a signal.
        """
        while not self.queue.empty():
            try:
                msg = self.queue.get_nowait()
                if msg['type'] == 'message':
                    self.emit('message', msg)
            except Empty:
                pass
        return True

    def subscribe(self, channel):
        self.pubsub.subscribe(channel)

    def unsusbcribe(self, channel):
        self.pubsub.unsubscribe(channel)
Ejemplo n.º 48
0
def download_interesting_files(token, scan_context: ScanningContext):
    """
    Downloads files which may be interesting to an attacker. Searches for certain keywords then downloads.
    """

    print(
        termcolor.colored(
            "START: Attempting to locate and download interesting files (this may take some time)",
            "white", "on_blue"))
    download_directory = scan_context.output_directory + '/downloads'
    pathlib.Path(download_directory).mkdir(parents=True, exist_ok=True)

    completed_file_names = Queue()
    file_requests = []
    unique_file_id = set()

    # strips out characters which, though accepted in Slack, aren't accepted in Windows
    bad_chars_re = re.compile(
        '[/:*?"<>|\\\]')  # Windows doesn't like "/ \ : * ? < > " or |
    page_counts_by_query = dict(
    )  # Accumulates the number of pages of results for each query
    common_file_dl_params = (token, scan_context.user_agent,
                             download_directory, completed_file_names)
    try:
        query_header = {'User-Agent': scan_context.user_agent}
        for query in INTERESTING_FILE_QUERIES:
            while True:
                request_url = "https://slack.com/api/search.files"
                params = dict(token=token,
                              query="\"{}\"".format(query),
                              pretty=1,
                              count=100)
                response_json = requests.get(request_url,
                                             params=params,
                                             headers=query_header).json()
                if not sleep_if_rate_limited(response_json):
                    break
            page_counts_by_query[query] = response_json['files']['pagination'][
                'page_count']

        for query, page_count in page_counts_by_query.items():
            page = 1
            while page <= page_count:
                request_url = "https://slack.com/api/search.files"
                params = dict(token=token,
                              query="\"{}\"".format(query),
                              pretty=1,
                              count=100,
                              page=str(page))
                response_json = requests.get(request_url,
                                             params=params,
                                             headers=query_header).json()
                sleep_if_rate_limited(response_json)
                new_files = [
                    new_file for new_file in response_json['files']['matches']
                    if new_file['id'] not in unique_file_id
                ]
                for new_file in new_files:
                    unique_file_id.add(new_file['id'])
                    file_name = new_file['id'] + "-" + new_file['name']
                    safe_filename = bad_chars_re.sub(
                        '_', file_name
                    )  # use underscores to replace tricky characters
                    file_dl_args = (new_file['url_private'],
                                    safe_filename) + common_file_dl_params
                    file_requests.append(
                        Process(target=_download_file, args=file_dl_args))
                page += 1

        # Now actually start the requests
        if file_requests:
            print(
                termcolor.colored(
                    "INFO: Retrieving {} files...".format(len(file_requests)),
                    "white", "on_blue"))
            file_batches = (
                file_requests[i:i + DOWNLOAD_BATCH_SIZE]
                for i in range(0, len(file_requests), DOWNLOAD_BATCH_SIZE))
            for batch in file_batches:
                _retrieve_file_batch(batch, completed_file_names)

        while not completed_file_names.empty():  # Print out any final results
            print(
                termcolor.colored(completed_file_names.get_nowait(), "white",
                                  "on_green"))

    except requests.exceptions.RequestException as exception:
        print(termcolor.colored(exception, "white", "on_red"))

    if file_requests:
        print(
            termcolor.colored(
                "END: Downloaded {} files to: ./{}/downloads".format(
                    len(file_requests), scan_context.output_directory),
                "white", "on_blue"))
    else:
        print(
            termcolor.colored("END: No interesting files discovered.", "white",
                              "on_blue"))
        print('\n')
Ejemplo n.º 49
0
class TestRunnerManager(threading.Thread):
    def __init__(self,
                 suite_name,
                 test_queue,
                 test_source_cls,
                 browser_cls,
                 browser_kwargs,
                 executor_cls,
                 executor_kwargs,
                 stop_flag,
                 rerun=1,
                 pause_after_test=False,
                 pause_on_unexpected=False,
                 restart_on_unexpected=True,
                 debug_info=None,
                 capture_stdio=True,
                 recording=None):
        """Thread that owns a single TestRunner process and any processes required
        by the TestRunner (e.g. the Firefox binary).

        TestRunnerManagers are responsible for launching the browser process and the
        runner process, and for logging the test progress. The actual test running
        is done by the TestRunner. In particular they:

        * Start the binary of the program under test
        * Start the TestRunner
        * Tell the TestRunner to start a test, if any
        * Log that the test started
        * Log the test results
        * Take any remedial action required e.g. restart crashed or hung
          processes
        """
        self.suite_name = suite_name

        self.test_source = test_source_cls(test_queue)

        self.browser_cls = browser_cls
        self.browser_kwargs = browser_kwargs

        self.executor_cls = executor_cls
        self.executor_kwargs = executor_kwargs

        # Flags used to shut down this thread if we get a sigint
        self.parent_stop_flag = stop_flag
        self.child_stop_flag = multiprocessing.Event()

        self.rerun = rerun
        self.run_count = 0
        self.pause_after_test = pause_after_test
        self.pause_on_unexpected = pause_on_unexpected
        self.restart_on_unexpected = restart_on_unexpected
        self.debug_info = debug_info

        self.manager_number = next_manager_number()
        assert recording is not None
        self.recording = recording

        self.command_queue = Queue()
        self.remote_queue = Queue()

        self.test_runner_proc = None

        threading.Thread.__init__(self,
                                  name="TestRunnerManager-%i" %
                                  self.manager_number)
        # This is started in the actual new thread
        self.logger = None

        self.test_count = 0
        self.unexpected_count = 0

        # This may not really be what we want
        self.daemon = True

        self.timer = None

        self.max_restarts = 5

        self.browser = None

        self.capture_stdio = capture_stdio

    def run(self):
        """Main loop for the TestRunnerManager.

        TestRunnerManagers generally receive commands from their
        TestRunner updating them on the status of a test. They
        may also have a stop flag set by the main thread indicating
        that the manager should shut down the next time the event loop
        spins."""
        self.recording.set(["testrunner", "startup"])
        self.logger = structuredlog.StructuredLogger(self.suite_name)
        with self.browser_cls(self.logger, **self.browser_kwargs) as browser:
            self.browser = BrowserManager(self.logger,
                                          browser,
                                          self.command_queue,
                                          no_timeout=self.debug_info
                                          is not None)
            dispatch = {
                RunnerManagerState.before_init: self.start_init,
                RunnerManagerState.initializing: self.init,
                RunnerManagerState.running: self.run_test,
                RunnerManagerState.restarting: self.restart_runner
            }

            self.state = RunnerManagerState.before_init()
            end_states = (RunnerManagerState.stop, RunnerManagerState.error)

            try:
                while not isinstance(self.state, end_states):
                    f = dispatch.get(self.state.__class__)
                    while f:
                        self.logger.debug("Dispatch %s" % f.__name__)
                        if self.should_stop():
                            return
                        new_state = f()
                        if new_state is None:
                            break
                        self.state = new_state
                        self.logger.debug("new state: %s" %
                                          self.state.__class__.__name__)
                        if isinstance(self.state, end_states):
                            return
                        f = dispatch.get(self.state.__class__)

                    new_state = None
                    while new_state is None:
                        new_state = self.wait_event()
                        if self.should_stop():
                            return
                    self.state = new_state
                    self.logger.debug("new state: %s" %
                                      self.state.__class__.__name__)
            except Exception:
                self.logger.error(traceback.format_exc())
                raise
            finally:
                self.logger.debug(
                    "TestRunnerManager main loop terminating, starting cleanup"
                )
                clean = isinstance(self.state, RunnerManagerState.stop)
                self.stop_runner(force=not clean)
                self.teardown()
        self.logger.debug("TestRunnerManager main loop terminated")

    def wait_event(self):
        dispatch = {
            RunnerManagerState.before_init: {},
            RunnerManagerState.initializing: {
                "init_succeeded": self.init_succeeded,
                "init_failed": self.init_failed,
            },
            RunnerManagerState.running: {
                "test_ended": self.test_ended,
                "wait_finished": self.wait_finished,
            },
            RunnerManagerState.restarting: {},
            RunnerManagerState.error: {},
            RunnerManagerState.stop: {},
            None: {
                "runner_teardown": self.runner_teardown,
                "log": self.log,
                "error": self.error
            }
        }
        try:
            command, data = self.command_queue.get(True, 1)
            self.logger.debug("Got command: %r" % command)
        except IOError:
            self.logger.error("Got IOError from poll")
            return RunnerManagerState.restarting(0)
        except Empty:
            if (self.debug_info and self.debug_info.interactive
                    and self.browser.started and not self.browser.is_alive()):
                self.logger.debug("Debugger exited")
                return RunnerManagerState.stop()

            if (isinstance(self.state, RunnerManagerState.running)
                    and not self.test_runner_proc.is_alive()):
                if not self.command_queue.empty():
                    # We got a new message so process that
                    return

                # If we got to here the runner presumably shut down
                # unexpectedly
                self.logger.info("Test runner process shut down")

                if self.state.test is not None:
                    # This could happen if the test runner crashed for some other
                    # reason
                    # Need to consider the unlikely case where one test causes the
                    # runner process to repeatedly die
                    self.logger.critical("Last test did not complete")
                    return RunnerManagerState.error()
                self.logger.warning(
                    "More tests found, but runner process died, restarting")
                return RunnerManagerState.restarting(0)
        else:
            f = (dispatch.get(self.state.__class__, {}).get(command)
                 or dispatch.get(None, {}).get(command))
            if not f:
                self.logger.warning("Got command %s in state %s" %
                                    (command, self.state.__class__.__name__))
                return
            return f(*data)

    def should_stop(self):
        return self.child_stop_flag.is_set() or self.parent_stop_flag.is_set()

    def start_init(self):
        test, test_group, group_metadata = self.get_next_test()
        self.recording.set(["testrunner", "init"])
        if test is None:
            return RunnerManagerState.stop()
        else:
            return RunnerManagerState.initializing(test, test_group,
                                                   group_metadata, 0)

    def init(self):
        assert isinstance(self.state, RunnerManagerState.initializing)
        if self.state.failure_count > self.max_restarts:
            self.logger.critical("Max restarts exceeded")
            return RunnerManagerState.error()

        self.browser.update_settings(self.state.test)

        result = self.browser.init(self.state.group_metadata)
        if result is Stop:
            return RunnerManagerState.error()
        elif not result:
            return RunnerManagerState.initializing(
                self.state.test, self.state.test_group,
                self.state.group_metadata, self.state.failure_count + 1)
        else:
            self.executor_kwargs["group_metadata"] = self.state.group_metadata
            self.start_test_runner()

    def start_test_runner(self):
        # Note that we need to be careful to start the browser before the
        # test runner to ensure that any state set when the browser is started
        # can be passed in to the test runner.
        assert isinstance(self.state, RunnerManagerState.initializing)
        assert self.command_queue is not None
        assert self.remote_queue is not None
        self.logger.info("Starting runner")
        executor_browser_cls, executor_browser_kwargs = self.browser.browser.executor_browser(
        )

        args = (self.remote_queue, self.command_queue, self.executor_cls,
                self.executor_kwargs, executor_browser_cls,
                executor_browser_kwargs, self.capture_stdio,
                self.child_stop_flag)
        self.test_runner_proc = Process(target=start_runner,
                                        args=args,
                                        name="TestRunner-%i" %
                                        self.manager_number)
        self.test_runner_proc.start()
        self.logger.debug("Test runner started")
        # Now we wait for either an init_succeeded event or an init_failed event

    def init_succeeded(self):
        assert isinstance(self.state, RunnerManagerState.initializing)
        self.browser.after_init()
        return RunnerManagerState.running(self.state.test,
                                          self.state.test_group,
                                          self.state.group_metadata)

    def init_failed(self):
        assert isinstance(self.state, RunnerManagerState.initializing)
        self.browser.check_crash(None)
        self.browser.after_init()
        self.stop_runner(force=True)
        return RunnerManagerState.initializing(self.state.test,
                                               self.state.test_group,
                                               self.state.group_metadata,
                                               self.state.failure_count + 1)

    def get_next_test(self, test_group=None):
        test = None
        while test is None:
            while test_group is None or len(test_group) == 0:
                test_group, group_metadata = self.test_source.group()
                if test_group is None:
                    self.logger.info("No more tests")
                    return None, None, None
            test = test_group.popleft()
        self.run_count = 0
        return test, test_group, group_metadata

    def run_test(self):
        assert isinstance(self.state, RunnerManagerState.running)
        assert self.state.test is not None

        if self.browser.update_settings(self.state.test):
            self.logger.info("Restarting browser for new test environment")
            return RunnerManagerState.restarting(self.state.test,
                                                 self.state.test_group,
                                                 self.state.group_metadata)

        self.recording.set(["testrunner", "test"] +
                           self.state.test.id.split("/")[1:])
        self.logger.test_start(self.state.test.id)
        if self.rerun > 1:
            self.logger.info("Run %d/%d" % (self.run_count, self.rerun))
            self.send_message("reset")
        self.run_count += 1
        if self.debug_info is None:
            # Factor of 3 on the extra timeout here is based on allowing the executor
            # at least test.timeout + 2 * extra_timeout to complete,
            # which in turn is based on having several layers of timeout inside the executor
            wait_timeout = (self.state.test.timeout *
                            self.executor_kwargs['timeout_multiplier'] +
                            3 * self.executor_cls.extra_timeout)
            self.timer = threading.Timer(wait_timeout, self._timeout)

        self.send_message("run_test", self.state.test)
        if self.timer:
            self.timer.start()

    def _timeout(self):
        # This is executed in a different thread (threading.Timer).
        self.logger.info("Got timeout in harness")
        test = self.state.test
        self.inject_message(
            "test_ended",
            test,
            (test.result_cls(
                "EXTERNAL-TIMEOUT", "TestRunner hit external timeout "
                "(this may indicate a hang)"), []),
        )

    def test_ended(self, test, results):
        """Handle the end of a test.

        Output the result of each subtest, and the result of the overall
        harness to the logs.
        """
        if ((not isinstance(self.state, RunnerManagerState.running))
                or (test != self.state.test)):
            # Due to inherent race conditions in EXTERNAL-TIMEOUT, we might
            # receive multiple test_ended for a test (e.g. from both Executor
            # and TestRunner), in which case we ignore the duplicate message.
            self.logger.error("Received unexpected test_ended for %s" % test)
            return
        if self.timer is not None:
            self.timer.cancel()
        # Write the result of each subtest
        file_result, test_results = results
        subtest_unexpected = False
        expect_any_subtest_status = test.expect_any_subtest_status()
        if expect_any_subtest_status:
            self.logger.debug("Ignoring subtest statuses for test %s" %
                              test.id)
        for result in test_results:
            if test.disabled(result.name):
                continue
            if expect_any_subtest_status:
                expected = result.status
            else:
                expected = test.expected(result.name)
            known_intermittent = test.known_intermittent(result.name)
            is_unexpected = expected != result.status and result.status not in known_intermittent

            if is_unexpected:
                self.unexpected_count += 1
                self.logger.debug("Unexpected count in this thread %i" %
                                  self.unexpected_count)
                subtest_unexpected = True
            self.logger.test_status(test.id,
                                    result.name,
                                    result.status,
                                    message=result.message,
                                    expected=expected,
                                    known_intermittent=known_intermittent,
                                    stack=result.stack)

        # We have a couple of status codes that are used internally, but not exposed to the
        # user. These are used to indicate that some possibly-broken state was reached
        # and we should restart the runner before the next test.
        # INTERNAL-ERROR indicates a Python exception was caught in the harness
        # EXTERNAL-TIMEOUT indicates we had to forcibly kill the browser from the harness
        # because the test didn't return a result after reaching the test-internal timeout
        status_subns = {
            "INTERNAL-ERROR": "ERROR",
            "EXTERNAL-TIMEOUT": "TIMEOUT"
        }
        expected = test.expected()
        known_intermittent = test.known_intermittent()
        status = status_subns.get(file_result.status, file_result.status)

        if self.browser.check_crash(test.id) and status != "CRASH":
            self.logger.info(
                "Found a crash dump; should change status from %s to CRASH but this causes instability"
                % (status, ))

        self.test_count += 1
        is_unexpected = expected != status and status not in known_intermittent
        if is_unexpected:
            self.unexpected_count += 1
            self.logger.debug("Unexpected count in this thread %i" %
                              self.unexpected_count)

        if "assertion_count" in file_result.extra:
            assertion_count = file_result.extra.pop("assertion_count")
            if assertion_count > 0:
                self.logger.assertion_count(test.id, int(assertion_count),
                                            test.min_assertion_count,
                                            test.max_assertion_count)

        file_result.extra[
            "test_timeout"] = test.timeout * self.executor_kwargs[
                'timeout_multiplier']

        self.logger.test_end(test.id,
                             status,
                             message=file_result.message,
                             expected=expected,
                             known_intermittent=known_intermittent,
                             extra=file_result.extra,
                             stack=file_result.stack)

        restart_before_next = (test.restart_after or file_result.status
                               in ("CRASH", "EXTERNAL-TIMEOUT",
                                   "INTERNAL-ERROR")
                               or ((subtest_unexpected or is_unexpected)
                                   and self.restart_on_unexpected))

        self.recording.set(["testrunner", "after-test"])
        if (not file_result.status == "CRASH" and self.pause_after_test
                or (self.pause_on_unexpected and
                    (subtest_unexpected or is_unexpected))):
            self.logger.info("Pausing until the browser exits")
            self.send_message("wait")
        else:
            return self.after_test_end(test, restart_before_next)

    def wait_finished(self):
        assert isinstance(self.state, RunnerManagerState.running)
        self.logger.debug("Wait finished")

        # The browser should be stopped already, but this ensures we do any
        # post-stop processing
        return self.after_test_end(self.state.test, True)

    def after_test_end(self, test, restart):
        assert isinstance(self.state, RunnerManagerState.running)
        if self.run_count == self.rerun:
            test, test_group, group_metadata = self.get_next_test()
            if test is None:
                return RunnerManagerState.stop()
            if test_group != self.state.test_group:
                # We are starting a new group of tests, so force a restart
                restart = True
        else:
            test_group = self.state.test_group
            group_metadata = self.state.group_metadata
        if restart:
            return RunnerManagerState.restarting(test, test_group,
                                                 group_metadata)
        else:
            return RunnerManagerState.running(test, test_group, group_metadata)

    def restart_runner(self):
        """Stop and restart the TestRunner"""
        assert isinstance(self.state, RunnerManagerState.restarting)
        self.stop_runner()
        return RunnerManagerState.initializing(self.state.test,
                                               self.state.test_group,
                                               self.state.group_metadata, 0)

    def log(self, action, kwargs):
        getattr(self.logger, action)(**kwargs)

    def error(self, message):
        self.logger.error(message)
        self.restart_runner()

    def stop_runner(self, force=False):
        """Stop the TestRunner and the browser binary."""
        self.recording.set(["testrunner", "stop_runner"])
        if self.test_runner_proc is None:
            return

        if self.test_runner_proc.is_alive():
            self.send_message("stop")
        try:
            self.browser.stop(force=force)
            self.ensure_runner_stopped()
        finally:
            self.cleanup()

    def teardown(self):
        self.logger.debug("TestRunnerManager teardown")
        self.test_runner_proc = None
        self.command_queue.close()
        self.remote_queue.close()
        self.command_queue = None
        self.remote_queue = None
        self.recording.pause()

    def ensure_runner_stopped(self):
        self.logger.debug("ensure_runner_stopped")
        if self.test_runner_proc is None:
            return

        self.browser.stop(force=True)
        self.logger.debug("waiting for runner process to end")
        self.test_runner_proc.join(10)
        self.logger.debug("After join")
        if self.test_runner_proc.is_alive():
            # This might leak a file handle from the queue
            self.logger.warning("Forcibly terminating runner process")
            self.test_runner_proc.terminate()

            # Multiprocessing queues are backed by operating system pipes. If
            # the pipe in the child process had buffered data at the time of
            # forced termination, the queue is no longer in a usable state
            # (subsequent attempts to retrieve items may block indefinitely).
            # Discard the potentially-corrupted queue and create a new one.
            self.command_queue.close()
            self.command_queue = Queue()
            self.remote_queue.close()
            self.remote_queue = Queue()
        else:
            self.logger.debug("Runner process exited with code %i" %
                              self.test_runner_proc.exitcode)

    def runner_teardown(self):
        self.ensure_runner_stopped()
        return RunnerManagerState.stop()

    def send_message(self, command, *args):
        """Send a message to the remote queue (to Executor)."""
        self.remote_queue.put((command, args))

    def inject_message(self, command, *args):
        """Inject a message to the command queue (from Executor)."""
        self.command_queue.put((command, args))

    def cleanup(self):
        self.logger.debug("TestRunnerManager cleanup")
        if self.browser:
            self.browser.cleanup()
        while True:
            try:
                cmd, data = self.command_queue.get_nowait()
            except Empty:
                break
            else:
                if cmd == "log":
                    self.log(*data)
                elif cmd == "runner_teardown":
                    # It's OK for the "runner_teardown" message to be left in
                    # the queue during cleanup, as we will already have tried
                    # to stop the TestRunner in `stop_runner`.
                    pass
                else:
                    self.logger.warning(
                        "Command left in command_queue during cleanup: %r, %r"
                        % (cmd, data))
        while True:
            try:
                cmd, data = self.remote_queue.get_nowait()
                self.logger.warning(
                    "Command left in remote_queue during cleanup: %r, %r" %
                    (cmd, data))
            except Empty:
                break
Ejemplo n.º 50
0
class AnalyzerDev(Thread):
    """
    The Analyzer class which controls the analyzer thread and spawned processes.
    """
    def __init__(self, parent_pid):
        """
        Initialize the Analyzer

        Create the :obj:`self.anomalous_metrics` list

        Create the :obj:`self.exceptions_q` queue

        Create the :obj:`self.anomaly_breakdown_q` queue

        """
        super(Analyzer, self).__init__()
        # @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
        if settings.REDIS_PASSWORD:
            self.redis_conn = StrictRedis(
                password=settings.REDIS_PASSWORD,
                unix_socket_path=settings.REDIS_SOCKET_PATH)
        else:
            self.redis_conn = StrictRedis(
                unix_socket_path=settings.REDIS_SOCKET_PATH)
        self.daemon = True
        self.parent_pid = parent_pid
        self.current_pid = getpid()
        self.anomalous_metrics = Manager().list()
        self.exceptions_q = Queue()
        self.anomaly_breakdown_q = Queue()
        # @modified 20160813 - Bug #1558: Memory leak in Analyzer
        # Not used
        # self.mirage_metrics = Manager().list()
        # @added 20160923 - Branch #922: Ionosphere
        self.mirage_metrics = Manager().list()
        self.ionosphere_metrics = Manager().list()
        # @added 20161119 - Branch #922: ionosphere
        #                   Task #1718: review.tsfresh
        # Send a breakdown of what metrics were sent to other apps
        self.sent_to_mirage = Manager().list()
        self.sent_to_crucible = Manager().list()
        self.sent_to_panorama = Manager().list()
        self.sent_to_ionosphere = Manager().list()
        # @added 20161229 - Feature #1830: Ionosphere alerts
        self.all_anomalous_metrics = Manager().list()
        # @added 20170108 - Feature #1830: Ionosphere alerts
        # Adding lists of smtp_alerter_metrics and non_smtp_alerter_metrics
        self.smtp_alerter_metrics = Manager().list()
        self.non_smtp_alerter_metrics = Manager().list()
        # @added 20180903 - Feature #2580: illuminance
        #                   Feature #1986: flux
        self.illuminance_datapoints = Manager().list()
        # @added 20190408 - Feature #2882: Mirage - periodic_check
        self.mirage_periodic_check_metrics = Manager().list()
        self.real_anomalous_metrics = Manager().list()

    def check_if_parent_is_alive(self):
        """
        Self explanatory
        """
        try:
            kill(self.current_pid, 0)
            kill(self.parent_pid, 0)
        except:
            exit(0)

    def spawn_alerter_process(self, alert, metric, context):
        """
        Spawn a process to trigger an alert.

        This is used by smtp alerters so that matplotlib objects are cleared
        down and the alerter cannot create a memory leak in this manner and
        plt.savefig keeps the object in memory until the process terminates.
        Seeing as data is being surfaced and processed in the alert_smtp
        context, multiprocessing the alert creation and handling prevents any
        memory leaks in the parent.

        Added 20160814 relating to:

        * Bug #1558: Memory leak in Analyzer
        * Issue #21 Memory leak in Analyzer see https://github.com/earthgecko/skyline/issues/21

        Parameters as per :py:func:`skyline.analyzer.alerters.trigger_alert
        <analyzer.alerters.trigger_alert>`

        """

        trigger_alert(alert, metric, context)

    def spin_process(self, i, unique_metrics):
        """
        Assign a bunch of metrics for a process to analyze.

        Multiple get the assigned_metrics to the process from Redis.

        For each metric:

        - unpack the `raw_timeseries` for the metric.
        - Analyse each timeseries against `ALGORITHMS` to determine if it is
          anomalous.
        - If anomalous add it to the :obj:`self.anomalous_metrics` list
        - Add what algorithms triggered to the :obj:`self.anomaly_breakdown_q`
          queue
        - If :mod:`settings.ENABLE_CRUCIBLE` is ``True``:

          - Add a crucible data file with the details about the timeseries and
            anomaly.
          - Write the timeseries to a json file for crucible.

        Add keys and values to the queue so the parent process can collate for:\n
        * :py:obj:`self.anomaly_breakdown_q`
        * :py:obj:`self.exceptions_q`
        """

        spin_start = time()
        logger.info('spin_process started')
        if LOCAL_DEBUG:
            logger.info('debug :: Memory usage spin_process start: %s (kb)' %
                        resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)

        # TESTING removal of p.join() from p.terminate()
        # sleep(4)

        # @modified 20160801 - Adding additional exception handling to Analyzer
        # Check the unique_metrics list is valid
        try:
            len(unique_metrics)
        except:
            logger.error('error :: the unique_metrics list is not valid')
            logger.info(traceback.format_exc())
            logger.info('nothing to do, no unique_metrics')
            return

        # Discover assigned metrics
        keys_per_processor = int(
            ceil(
                float(len(unique_metrics)) /
                float(settings.ANALYZER_PROCESSES)))
        if i == settings.ANALYZER_PROCESSES:
            assigned_max = len(unique_metrics)
        else:
            assigned_max = min(len(unique_metrics), i * keys_per_processor)
        # Fix analyzer worker metric assignment #94
        # https://github.com/etsy/skyline/pull/94 @languitar:worker-fix
        assigned_min = (i - 1) * keys_per_processor
        assigned_keys = range(assigned_min, assigned_max)
        # assigned_keys = range(300, 310)

        # Compile assigned metrics
        assigned_metrics = [unique_metrics[index] for index in assigned_keys]
        if LOCAL_DEBUG:
            logger.info(
                'debug :: Memory usage spin_process after assigned_metrics: %s (kb)'
                % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)

        # @added 20190410 - Feature #2916: ANALYZER_ENABLED setting
        if not ANALYZER_ENABLED:
            len_assigned_metrics = len(assigned_metrics)
            logger.info(
                'ANALYZER_ENABLED is set to %s removing the %s assigned_metrics'
                % (str(ANALYZER_ENABLED), str(len_assigned_metrics)))
            assigned_metrics = []
            del unique_metrics

        # Check if this process is unnecessary
        if len(assigned_metrics) == 0:
            return

        # Multi get series
        # @modified 20160801 - Adding additional exception handling to Analyzer
        raw_assigned_failed = True
        try:
            raw_assigned = self.redis_conn.mget(assigned_metrics)
            raw_assigned_failed = False
            if LOCAL_DEBUG:
                logger.info(
                    'debug :: Memory usage spin_process after raw_assigned: %s (kb)'
                    % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
        except:
            logger.info(traceback.format_exc())
            logger.error('error :: failed to get assigned_metrics from Redis')

        # Make process-specific dicts
        exceptions = defaultdict(int)
        anomaly_breakdown = defaultdict(int)

        # @added 20160803 - Adding additional exception handling to Analyzer
        if raw_assigned_failed:
            return

        # @added 20161119 - Branch #922: ionosphere
        #                   Task #1718: review.tsfresh
        # Determine the unique Mirage and Ionosphere metrics once, which are
        # used later to determine how Analyzer should handle/route anomalies
        try:
            mirage_unique_metrics = list(
                self.redis_conn.smembers('mirage.unique_metrics'))
        except:
            mirage_unique_metrics = []

        # @added 20190408 - Feature #2882: Mirage - periodic_check
        # Add Mirage periodic checks so that Mirage is analysing each metric at
        # least once per hour.
        mirage_periodic_check_metric_list = []
        try:
            mirage_periodic_check_enabled = settings.MIRAGE_PERIODIC_CHECK
        except:
            mirage_periodic_check_enabled = False
        try:
            mirage_periodic_check_interval = settings.MIRAGE_PERIODIC_CHECK_INTERVAL
        except:
            mirage_periodic_check_interval = 3600
        mirage_periodic_check_interval_minutes = int(
            int(mirage_periodic_check_interval) / 60)
        if mirage_unique_metrics and mirage_periodic_check_enabled:
            mirage_unique_metrics_count = len(mirage_unique_metrics)
            # Mirage periodic checks are only done on declared namespaces as to
            # process all Mirage metrics periodically would probably create a
            # substantial load on Graphite and is probably not required only key
            # metrics should be analysed by Mirage periodically.
            periodic_check_mirage_metrics = []
            try:
                mirage_periodic_check_namespaces = settings.MIRAGE_PERIODIC_CHECK_NAMESPACES
            except:
                mirage_periodic_check_namespaces = []
            for namespace in mirage_periodic_check_namespaces:
                for metric_name in mirage_unique_metrics:
                    metric_namespace_elements = metric_name.split('.')
                    mirage_periodic_metric = False
                    for periodic_namespace in mirage_periodic_check_namespaces:
                        if not namespace in mirage_periodic_check_namespaces:
                            continue
                        periodic_namespace_namespace_elements = periodic_namespace.split(
                            '.')
                        elements_matched = set(
                            metric_namespace_elements) & set(
                                periodic_namespace_namespace_elements)
                        if len(elements_matched) == len(
                                periodic_namespace_namespace_elements):
                            mirage_periodic_metric = True
                            break
                    if mirage_periodic_metric:
                        if not metric_name in periodic_check_mirage_metrics:
                            periodic_check_mirage_metrics.append(metric_name)

            periodic_check_mirage_metrics_count = len(
                periodic_check_mirage_metrics)
            logger.info('there are %s known Mirage periodic metrics' %
                        (str(periodic_check_mirage_metrics_count)))
            for metric_name in periodic_check_mirage_metrics:
                try:
                    self.redis_conn.sadd(
                        'new.mirage.periodic_check.metrics.all', metric_name)
                except Exception as e:
                    logger.error(
                        'error :: could not add %s to Redis set new.mirage.periodic_check.metrics.all: %s'
                        % (metric_name, e))
            try:
                self.redis_conn.rename(
                    'mirage.periodic_check.metrics.all',
                    'mirage.periodic_check.metrics.all.old')
            except:
                pass
            try:
                self.redis_conn.rename('new.mirage.periodic_check.metrics.all',
                                       'mirage.periodic_check.metrics.all')
            except:
                pass
            try:
                self.redis_conn.delete('mirage.periodic_check.metrics.all.old')
            except:
                pass

            if periodic_check_mirage_metrics_count > mirage_periodic_check_interval_minutes:
                mirage_periodic_checks_per_minute = periodic_check_mirage_metrics_count / mirage_periodic_check_interval_minutes
            else:
                mirage_periodic_checks_per_minute = 1
            logger.info('%s Mirage periodic checks can be added' %
                        (str(int(mirage_periodic_checks_per_minute))))
            for metric_name in periodic_check_mirage_metrics:
                if len(mirage_periodic_check_metric_list) == int(
                        mirage_periodic_checks_per_minute):
                    break
                base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
                mirage_periodic_check_cache_key = 'mirage.periodic_check.%s' % base_name
                mirage_periodic_check_key = False
                try:
                    mirage_periodic_check_key = self.redis_conn.get(
                        mirage_periodic_check_cache_key)
                except Exception as e:
                    logger.error(
                        'error :: could not query Redis for cache_key: %s' % e)
                if not mirage_periodic_check_key:
                    try:
                        key_created_at = int(time())
                        self.redis_conn.setex(mirage_periodic_check_cache_key,
                                              mirage_periodic_check_interval,
                                              key_created_at)
                        logger.info(
                            'created Mirage periodic_check Redis key - %s' %
                            (mirage_periodic_check_cache_key))
                        mirage_periodic_check_metric_list.append(metric_name)
                        try:
                            self.redis_conn.sadd(
                                'new.mirage.periodic_check.metrics',
                                metric_name)
                        except Exception as e:
                            logger.error(
                                'error :: could not add %s to Redis set new.mirage.periodic_check.metrics: %s'
                                % (metric_name, e))
                    except:
                        logger.error(traceback.format_exc())
                        logger.error(
                            'error :: failed to create Mirage periodic_check Redis key - %s'
                            % (mirage_periodic_check_cache_key))
            try:
                self.redis_conn.rename('mirage.periodic_check.metrics',
                                       'mirage.periodic_check.metrics.old')
            except:
                pass
            try:
                self.redis_conn.rename('new.mirage.periodic_check.metrics',
                                       'mirage.periodic_check.metrics')
            except:
                pass
            try:
                self.redis_conn.delete('mirage.periodic_check.metrics.old')
            except:
                pass
            mirage_periodic_check_metric_list_count = len(
                mirage_periodic_check_metric_list)
            logger.info('%s Mirage periodic checks were added' %
                        (str(mirage_periodic_check_metric_list_count)))

        try:
            ionosphere_unique_metrics = list(
                self.redis_conn.smembers('ionosphere.unique_metrics'))
        except:
            ionosphere_unique_metrics = []

        # @added 20170602 - Feature #2034: analyse_derivatives
        # In order to convert monotonic, incrementing metrics to a deriative
        # metric
        try:
            derivative_metrics = list(
                self.redis_conn.smembers('derivative_metrics'))
        except:
            derivative_metrics = []
        try:
            non_derivative_metrics = list(
                self.redis_conn.smembers('non_derivative_metrics'))
        except:
            non_derivative_metrics = []
        # This is here to refresh the sets
        try:
            manage_derivative_metrics = self.redis_conn.get(
                'analyzer.derivative_metrics_expiry')
        except Exception as e:
            if LOCAL_DEBUG:
                logger.error(
                    'error :: could not query Redis for analyzer.derivative_metrics_expiry key: %s'
                    % str(e))
            manage_derivative_metrics = False

        # @added 20170901 - Bug #2154: Infrequent missing new_ Redis keys
        # If the analyzer.derivative_metrics_expiry is going to expire in the
        # next 60 seconds, just manage the derivative_metrics in the run as
        # there is an overlap some times where the key existed at the start of
        # the run but has expired by the end of the run.
        derivative_metrics_expiry_ttl = False
        if manage_derivative_metrics:
            try:
                derivative_metrics_expiry_ttl = self.redis_conn.ttl(
                    'analyzer.derivative_metrics_expiry')
                logger.info(
                    'the analyzer.derivative_metrics_expiry key ttl is %s' %
                    str(derivative_metrics_expiry_ttl))
            except:
                logger.error(
                    'error :: could not query Redis for analyzer.derivative_metrics_expiry key: %s'
                    % str(e))
            if derivative_metrics_expiry_ttl:
                if int(derivative_metrics_expiry_ttl) < 60:
                    logger.info(
                        'managing derivative_metrics as the analyzer.derivative_metrics_expiry key ttl is less than 60 with %s'
                        % str(derivative_metrics_expiry_ttl))
                    manage_derivative_metrics = False
                    try:
                        self.redis_conn.delete(
                            'analyzer.derivative_metrics_expiry')
                        logger.info(
                            'deleted the Redis key analyzer.derivative_metrics_expiry'
                        )
                    except:
                        logger.error(
                            'error :: failed to delete Redis key :: analyzer.derivative_metrics_expiry'
                        )

        try:
            non_derivative_monotonic_metrics = settings.NON_DERIVATIVE_MONOTONIC_METRICS
        except:
            non_derivative_monotonic_metrics = []

        # @added 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
        # Added Redis sets for Boring, TooShort and Stale
        redis_set_errors = 0

        # Distill timeseries strings into lists
        for i, metric_name in enumerate(assigned_metrics):
            self.check_if_parent_is_alive()

            # logger.info('analysing %s' % metric_name)

            try:
                raw_series = raw_assigned[i]
                unpacker = Unpacker(use_list=False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)
            except:
                timeseries = []

            # @added 20200507 - Feature #3532: Sort all time series
            # To ensure that there are no unordered timestamps in the time
            # series which are artefacts of the collector or carbon-relay, sort
            # all time series by timestamp before analysis.
            original_timeseries = timeseries
            if original_timeseries:
                timeseries = sort_timeseries(original_timeseries)
                del original_timeseries

            # @added 20170602 - Feature #2034: analyse_derivatives
            # In order to convert monotonic, incrementing metrics to a deriative
            # metric
            known_derivative_metric = False
            unknown_deriv_status = True
            if metric_name in non_derivative_metrics:
                unknown_deriv_status = False
            if unknown_deriv_status:
                if metric_name in derivative_metrics:
                    known_derivative_metric = True
                    unknown_deriv_status = False
            # This is here to refresh the sets
            if not manage_derivative_metrics:
                unknown_deriv_status = True

            base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)

            # @added 20170617 - Bug #2050: analyse_derivatives - change in monotonicity
            # First check if it has its own Redis z.derivative_metric key
            # that has not expired
            derivative_metric_key = 'z.derivative_metric.%s' % str(base_name)

            if unknown_deriv_status:
                # @added 20170617 - Bug #2050: analyse_derivatives - change in monotonicity
                last_derivative_metric_key = False
                try:
                    last_derivative_metric_key = self.redis_conn.get(
                        derivative_metric_key)
                except Exception as e:
                    logger.error(
                        'error :: could not query Redis for last_derivative_metric_key: %s'
                        % e)

                # Determine if it is a strictly increasing monotonically metric
                # or has been in last FULL_DURATION via its z.derivative_metric
                # key
                if not last_derivative_metric_key:
                    is_strictly_increasing_monotonically = strictly_increasing_monotonicity(
                        timeseries)
                    if is_strictly_increasing_monotonically:
                        try:
                            last_expire_set = int(time())
                            self.redis_conn.setex(derivative_metric_key,
                                                  settings.FULL_DURATION,
                                                  last_expire_set)
                        except Exception as e:
                            logger.error(
                                'error :: could not set Redis derivative_metric key: %s'
                                % e)
                else:
                    # Until the z.derivative_metric key expires, it is classed
                    # as such
                    is_strictly_increasing_monotonically = True

                skip_derivative = in_list(base_name,
                                          non_derivative_monotonic_metrics)
                if skip_derivative:
                    is_strictly_increasing_monotonically = False
                if is_strictly_increasing_monotonically:
                    known_derivative_metric = True
                    try:
                        self.redis_conn.sadd('derivative_metrics', metric_name)
                    except:
                        logger.info(traceback.format_exc())
                        logger.error(
                            'error :: failed to add metric to Redis derivative_metrics set'
                        )
                    try:
                        self.redis_conn.sadd('new_derivative_metrics',
                                             metric_name)
                    except:
                        logger.info(traceback.format_exc())
                        logger.error(
                            'error :: failed to add metric to Redis new_derivative_metrics set'
                        )
                else:
                    try:
                        self.redis_conn.sadd('non_derivative_metrics',
                                             metric_name)
                    except:
                        logger.info(traceback.format_exc())
                        logger.error(
                            'error :: failed to add metric to Redis non_derivative_metrics set'
                        )
                    try:
                        self.redis_conn.sadd('new_non_derivative_metrics',
                                             metric_name)
                    except:
                        logger.info(traceback.format_exc())
                        logger.error(
                            'error :: failed to add metric to Redis new_non_derivative_metrics set'
                        )
            if known_derivative_metric:
                try:
                    derivative_timeseries = nonNegativeDerivative(timeseries)
                    timeseries = derivative_timeseries
                except:
                    logger.error('error :: nonNegativeDerivative failed')

            # @added 20180903 - Feature #2580: illuminance
            #                   Feature #1986: flux
            try:
                illuminance_datapoint = timeseries[-1][1]
                if '.illuminance' not in metric_name:
                    self.illuminance_datapoints.append(illuminance_datapoint)
            except:
                pass

            try:
                anomalous, ensemble, datapoint = run_selected_algorithm(
                    timeseries, metric_name)

                # @added 20190408 - Feature #2882: Mirage - periodic_check
                # Add for Mirage periodic - is really anomalous add to
                # real_anomalous_metrics and if in mirage_periodic_check_metric_list
                # add as anomalous
                if anomalous:
                    # @modified 20190412 - Bug #2932: self.real_anomalous_metrics not being populated correctly
                    #                      Feature #2882: Mirage - periodic_check
                    # self.real_anomalous_metrics.append(base_name)
                    base_name = metric_name.replace(settings.FULL_NAMESPACE,
                                                    '', 1)
                    metric_timestamp = timeseries[-1][0]
                    metric = [datapoint, base_name, metric_timestamp]
                    self.real_anomalous_metrics.append(metric)
                if metric_name in mirage_periodic_check_metric_list:
                    self.mirage_periodic_check_metrics.append(base_name)
                    anomalous = True

                # If it's anomalous, add it to list
                if anomalous:
                    base_name = metric_name.replace(settings.FULL_NAMESPACE,
                                                    '', 1)
                    metric_timestamp = timeseries[-1][0]
                    metric = [datapoint, base_name, metric_timestamp]
                    self.anomalous_metrics.append(metric)

                    # Get the anomaly breakdown - who returned True?
                    triggered_algorithms = []
                    for index, value in enumerate(ensemble):
                        if value:
                            algorithm = settings.ALGORITHMS[index]
                            anomaly_breakdown[algorithm] += 1
                            triggered_algorithms.append(algorithm)

            # It could have been deleted by the Roomba
            except TypeError:
                # logger.error('TypeError analysing %s' % metric_name)
                exceptions['DeletedByRoomba'] += 1
            except TooShort:
                # logger.error('TooShort analysing %s' % metric_name)
                exceptions['TooShort'] += 1
            except Stale:
                # logger.error('Stale analysing %s' % metric_name)
                exceptions['Stale'] += 1
            except Boring:
                # logger.error('Boring analysing %s' % metric_name)
                exceptions['Boring'] += 1
            except:
                # logger.error('Other analysing %s' % metric_name)
                exceptions['Other'] += 1
                logger.info(traceback.format_exc())

        # Add values to the queue so the parent process can collate
        for key, value in anomaly_breakdown.items():
            self.anomaly_breakdown_q.put((key, value))

        for key, value in exceptions.items():
            self.exceptions_q.put((key, value))

        spin_end = time() - spin_start
        logger.info('spin_process took %.2f seconds' % spin_end)

    def run(self):
        """
        Called when the process intializes.

        Determine if Redis is up and discover the number of `unique metrics`.

        Divide the `unique_metrics` between the number of `ANALYZER_PROCESSES`
        and assign each process a set of metrics to analyse for anomalies.

        Wait for the processes to finish.

        Process the Determine whether if any anomalous metrics require:\n
        * alerting on (and set `EXPIRATION_TIME` key in Redis for alert).\n
        * feeding to another module e.g. mirage.

        Populated the webapp json the anomalous_metrics details.

        Log the details about the run to the skyline log.

        Send skyline.analyzer metrics to `GRAPHITE_HOST`,
        """

        # Log management to prevent overwriting
        # Allow the bin/<skyline_app>.d to manage the log
        if os.path.isfile(skyline_app_logwait):
            try:
                os.remove(skyline_app_logwait)
            except OSError:
                logger.error('error - failed to remove %s, continuing' %
                             skyline_app_logwait)
                pass

        now = time()
        log_wait_for = now + 5
        while now < log_wait_for:
            if os.path.isfile(skyline_app_loglock):
                sleep(.1)
                now = time()
            else:
                now = log_wait_for + 1

        logger.info('starting %s run' % skyline_app)
        if os.path.isfile(skyline_app_loglock):
            logger.error(
                'error - bin/%s.d log management seems to have failed, continuing'
                % skyline_app)
            try:
                os.remove(skyline_app_loglock)
                logger.info('log lock file removed')
            except OSError:
                logger.error('error - failed to remove %s, continuing' %
                             skyline_app_loglock)
                pass
        else:
            logger.info('bin/%s.d log management done' % skyline_app)

        if not os.path.exists(settings.SKYLINE_TMP_DIR):
            if python_version == 2:
                os.makedirs(settings.SKYLINE_TMP_DIR, 0750)
            if python_version == 3:
                os.makedirs(settings.SKYLINE_TMP_DIR, mode=0o750)

        # Initiate the algorithm timings if Analyzer is configured to send the
        # algorithm_breakdown metrics with ENABLE_ALGORITHM_RUN_METRICS
        algorithm_tmp_file_prefix = settings.SKYLINE_TMP_DIR + '/' + skyline_app + '.'
        algorithms_to_time = []
        if send_algorithm_run_metrics:
            algorithms_to_time = settings.ALGORITHMS

        while 1:
            now = time()

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error(
                    'skyline can\'t connect to redis at socket path %s' %
                    settings.REDIS_SOCKET_PATH)
                sleep(10)
                # @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
                if settings.REDIS_PASSWORD:
                    self.redis_conn = StrictRedis(
                        password=settings.REDIS_PASSWORD,
                        unix_socket_path=settings.REDIS_SOCKET_PATH)
                else:
                    self.redis_conn = StrictRedis(
                        unix_socket_path=settings.REDIS_SOCKET_PATH)
                continue

            # Report app up
            self.redis_conn.setex(skyline_app, 120, now)

            # Discover unique metrics
            unique_metrics = list(
                self.redis_conn.smembers(settings.FULL_NAMESPACE +
                                         'unique_metrics'))

            if len(unique_metrics) == 0:
                logger.info(
                    'no metrics in redis. try adding some - see README')
                sleep(10)
                continue

            # Using count files rather that multiprocessing.Value to enable metrics for
            # metrics for algorithm run times, etc
            for algorithm in algorithms_to_time:
                algorithm_count_file = algorithm_tmp_file_prefix + algorithm + '.count'
                algorithm_timings_file = algorithm_tmp_file_prefix + algorithm + '.timings'
                # with open(algorithm_count_file, 'a') as f:
                with open(algorithm_count_file, 'w') as f:
                    pass
                with open(algorithm_timings_file, 'w') as f:
                    pass

            # Spawn processes
            pids = []
            pid_count = 0
            for i in range(1, settings.ANALYZER_PROCESSES + 1):
                if i > len(unique_metrics):
                    logger.info(
                        'WARNING: skyline is set for more cores than needed.')
                    break

                p = Process(target=self.spin_process, args=(i, unique_metrics))
                pids.append(p)
                pid_count += 1
                logger.info('starting %s of %s spin_process/es' %
                            (str(pid_count), str(settings.ANALYZER_PROCESSES)))
                p.start()

            # Send wait signal to zombie processes
            # for p in pids:
            #     p.join()
            # Self monitor processes and terminate if any spin_process has run
            # for longer than 180 seconds
            p_starts = time()
            while time() - p_starts <= 180:
                if any(p.is_alive() for p in pids):
                    # Just to avoid hogging the CPU
                    sleep(.1)
                else:
                    # All the processes are done, break now.
                    time_to_run = time() - p_starts
                    logger.info(
                        '%s :: %s spin_process/es completed in %.2f seconds' %
                        (skyline_app, str(
                            settings.ANALYZER_PROCESSES), time_to_run))
                    break
            else:
                # We only enter this if we didn't 'break' above.
                logger.info(
                    '%s :: timed out, killing all spin_process processes' %
                    (skyline_app))
                for p in pids:
                    p.terminate()
                    # p.join()

            # Grab data from the queue and populate dictionaries
            exceptions = dict()
            anomaly_breakdown = dict()
            while 1:
                try:
                    key, value = self.anomaly_breakdown_q.get_nowait()
                    if key not in anomaly_breakdown.keys():
                        anomaly_breakdown[key] = value
                    else:
                        anomaly_breakdown[key] += value
                except Empty:
                    break

            while 1:
                try:
                    key, value = self.exceptions_q.get_nowait()
                    if key not in exceptions.keys():
                        exceptions[key] = value
                    else:
                        exceptions[key] += value
                except Empty:
                    break

            # Push to panorama
#            if len(self.panorama_anomalous_metrics) > 0:
#                logger.info('to do - push to panorama')

# Push to crucible
#            if len(self.crucible_anomalous_metrics) > 0:
#                logger.info('to do - push to crucible')

# Write anomalous_metrics to static webapp directory

# Using count files rather that multiprocessing.Value to enable metrics for
# metrics for algorithm run times, etc
            for algorithm in algorithms_to_time:
                algorithm_count_file = algorithm_tmp_file_prefix + algorithm + '.count'
                algorithm_timings_file = algorithm_tmp_file_prefix + algorithm + '.timings'

                try:
                    algorithm_count_array = []
                    with open(algorithm_count_file, 'r') as f:
                        for line in f:
                            value_string = line.replace('\n', '')
                            unquoted_value_string = value_string.replace(
                                "'", '')
                            float_value = float(unquoted_value_string)
                            algorithm_count_array.append(float_value)
                except:
                    algorithm_count_array = False

                if not algorithm_count_array:
                    continue

                number_of_times_algorithm_run = len(algorithm_count_array)
                logger.info('algorithm run count - %s run %s times' %
                            (algorithm, str(number_of_times_algorithm_run)))
                if number_of_times_algorithm_run == 0:
                    continue

                try:
                    algorithm_timings_array = []
                    with open(algorithm_timings_file, 'r') as f:
                        for line in f:
                            value_string = line.replace('\n', '')
                            unquoted_value_string = value_string.replace(
                                "'", '')
                            float_value = float(unquoted_value_string)
                            algorithm_timings_array.append(float_value)
                except:
                    algorithm_timings_array = False

                if not algorithm_timings_array:
                    continue

                number_of_algorithm_timings = len(algorithm_timings_array)
                logger.info('algorithm timings count - %s has %s timings' %
                            (algorithm, str(number_of_algorithm_timings)))

                if number_of_algorithm_timings == 0:
                    continue

                try:
                    _sum_of_algorithm_timings = sum(algorithm_timings_array)
                except:
                    logger.error("sum error: " + traceback.format_exc())
                    _sum_of_algorithm_timings = round(0.0, 6)
                    logger.error('error - sum_of_algorithm_timings - %s' %
                                 (algorithm))
                    continue

                sum_of_algorithm_timings = round(_sum_of_algorithm_timings, 6)
                # logger.info('sum_of_algorithm_timings - %s - %.16f seconds' % (algorithm, sum_of_algorithm_timings))

                try:
                    _median_algorithm_timing = determine_median(
                        algorithm_timings_array)
                except:
                    _median_algorithm_timing = round(0.0, 6)
                    logger.error('error - _median_algorithm_timing - %s' %
                                 (algorithm))
                    continue
                median_algorithm_timing = round(_median_algorithm_timing, 6)
                # logger.info('median_algorithm_timing - %s - %.16f seconds' % (algorithm, median_algorithm_timing))

                logger.info(
                    'algorithm timing - %s - total: %.6f - median: %.6f' %
                    (algorithm, sum_of_algorithm_timings,
                     median_algorithm_timing))
                send_mertic_name = 'algorithm_breakdown.' + algorithm + '.timing.times_run'
                self.send_graphite_metric(send_mertic_name,
                                          '%d' % number_of_algorithm_timings)
                send_mertic_name = 'algorithm_breakdown.' + algorithm + '.timing.total_time'
                self.send_graphite_metric(send_mertic_name,
                                          '%.6f' % sum_of_algorithm_timings)
                send_mertic_name = 'algorithm_breakdown.' + algorithm + '.timing.median_time'
                self.send_graphite_metric(send_mertic_name,
                                          '%.6f' % median_algorithm_timing)

            # Log progress
            logger.info('seconds to run    :: %.2f' % (time() - now))
            logger.info('total metrics     :: %d' % len(unique_metrics))
            logger.info('total analyzed    :: %d' %
                        (len(unique_metrics) - sum(exceptions.values())))
            logger.info('total anomalies   :: %d' %
                        len(self.anomalous_metrics))
            logger.info('exception stats   :: %s' % exceptions)
            logger.info('anomaly breakdown :: %s' % anomaly_breakdown)

            # Log to Graphite
            self.send_graphite_metric('run_time', '%.2f' % (time() - now))
            self.send_graphite_metric(
                'total_analyzed',
                '%.2f' % (len(unique_metrics) - sum(exceptions.values())))
            self.send_graphite_metric('total_anomalies',
                                      '%d' % len(self.anomalous_metrics))
            self.send_graphite_metric('total_metrics',
                                      '%d' % len(unique_metrics))
            for key, value in exceptions.items():
                send_metric = 'exceptions.%s' % key
                self.send_graphite_metric(send_metric, '%d' % value)
            for key, value in anomaly_breakdown.items():
                send_metric = 'anomaly_breakdown.%s' % key
                self.send_graphite_metric(send_metric, '%d' % value)

            # Check canary metric
            raw_series = self.redis_conn.get(settings.FULL_NAMESPACE +
                                             settings.CANARY_METRIC)
            if raw_series is not None:
                unpacker = Unpacker(use_list=False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)

                # @added 20200507 - Feature #3532: Sort all time series
                # To ensure that there are no unordered timestamps in the time
                # series which are artefacts of the collector or carbon-relay, sort
                # all time series by timestamp before analysis.
                original_timeseries = timeseries
                if original_timeseries:
                    timeseries = sort_timeseries(original_timeseries)
                    del original_timeseries

                time_human = (timeseries[-1][0] - timeseries[0][0]) / 3600
                projected = 24 * (time() - now) / time_human

                logger.info('canary duration   :: %.2f' % time_human)
                self.send_graphite_metric('duration', '%.2f' % time_human)
                self.send_graphite_metric('projected', '%.2f' % projected)

            # Reset counters
            self.anomalous_metrics[:] = []

            # Sleep if it went too fast
            # if time() - now < 5:
            #    logger.info('sleeping due to low run time...')
            #    sleep(10)
            # @modified 20160504 - @earthgecko - development internal ref #1338, #1340)
            # Etsy's original if this was a value of 5 seconds which does
            # not make skyline Analyzer very efficient in terms of installations
            # where 100s of 1000s of metrics are being analyzed.  This lead to
            # Analyzer running over several metrics multiple time in a minute
            # and always working.  Therefore this was changed from if you took
            # less than 5 seconds to run only then sleep.  This behaviour
            # resulted in Analyzer analysing a few 1000 metrics in 9 seconds and
            # then doing it again and again in a single minute.  Therefore the
            # ANALYZER_OPTIMUM_RUN_DURATION setting was added to allow this to
            # self optimise in cases where skyline is NOT deployed to analyze
            # 100s of 1000s of metrics.  This relates to optimising performance
            # for any deployments in the few 1000s and 60 second resolution
            # area, e.g. smaller and local deployments.
            process_runtime = time() - now
            analyzer_optimum_run_duration = settings.ANALYZER_OPTIMUM_RUN_DURATION
            if process_runtime < analyzer_optimum_run_duration:
                sleep_for = (analyzer_optimum_run_duration - process_runtime)
                # sleep_for = 60
                logger.info(
                    'sleeping for %.2f seconds due to low run time...' %
                    sleep_for)
                sleep(sleep_for)
Ejemplo n.º 51
0
__author__ = "Alex Li"

from multiprocessing import Process, Queue
import threading
#import queue

# def f(q):
#     q.put([42, None, 'hello'])


def f(qq):
    print("in child:", qq.qsize())
    qq.put([42, None, 'hello'])


if __name__ == '__main__':
    q = Queue()
    q.put("test123")
    #p = threading.Thread(target=f,)
    p = Process(target=f, args=(q, ))
    p.start()
    p.join()
    print("444", q.get_nowait())
    print("444", q.get_nowait())
    # prints "[42, None, 'hello']"
    #print(q.get())  # prints "[42, None, 'hello']"
Ejemplo n.º 52
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from multiprocessing import Queue, Process


def f(cq):
    cq.put(['my', 'name', 'is', ['lilei', 'xixi']])  # 往队列中添加一个元素


if __name__ == '__main__':
    mq = Queue()  # 定义进程队列实例
    mq.put('fome main')  # 往队列中添加一个元素
    p = Process(target=f, args=(mq, ))  # 创建一个子进程,并将mq传给子进程
    p.start()  # 启动
    p.join()  # 等待子进程执行完毕
    print('444', mq.get_nowait())  # 获取队列元素
    print('444', mq.get_nowait())
Ejemplo n.º 53
0
def makeNetlist(ascfile):
    print( "*** " + ascfile)
    """ ltspice path """
    #exe = os.path.join("C:\\","Program Files","LTC","LTspiceIV","scad3.exe")
    #exe = os.path.join("C:\\","Program Files (x86)","LTC","LTspiceIV","scad3.exe")
    exe = os.path.join("C:\\","Program Files","LTC","LTspiceXVII","XVIIx64.exe")

    #try:
    #    test = os.path.join(os.environ['HOME'],".wine","drive_c", \
    #            "Program Files (x86)","LTC","LTspiceIV","scad3.exe")
    #except:
    #    pass
    #if os.path.isfile(test) :

    """ linux/wine scad call """
    scad = ["wine",exe,'-wine','-netlist']

    """ for windows """
    windows = False
    if sys.platform == "darwin":
        pass
    elif "win" in sys.platform:
        windows = True
        scad = [exe,'-netlist']
    elif "linux" in sys.platform:
        if 'Microsoft' in platform.uname().release:
            print ("Windows Subsystem for Linux")
            exe = "/mnt/c/Program\ Files/LTC/LTspiceXVII/XVIIx64.exe"
            windows - True
            scad = [exe,'-netlist']


    """
    TODO: 
    LTSPICE has to be run in whatever path the cir file is sitting or the library links dont work
    """
    ln = ascfile.split(".")
    extension = ln.pop()
    basename = ".".join(ln)
    logfile = basename + '.log'

    #setup subprocess to start ltspice
    spiceMsgQueue = Queue()
    spice = Process(target=runLTspice, args=(ascfile,scad,spiceMsgQueue))

    #os.remove the old log files
    if(not os.path.isfile(ascfile)):
        print( "the ascfile %s does not exist" % ascfile)
        return []
    
    spice.start()
    spiceDone = False
    while not spiceDone:
        try:
            xdone = spiceMsgQueue.get_nowait()
            if(xdone == "Done"):
                spiceDone = True
                break
        except: 
            pass

    spice.join()
    return 
Ejemplo n.º 54
0
    base_vel.angular.x = float(data[4])
    base_vel.angular.y = float(data[5])
    base_vel.angular.z = float(data[6])
    print "sending"
    pub.publish(base_vel)
    rate.sleep()


def send_serial(message_to_send):
    #time.sleep(0.013)
    if ser.isOpen():
        ser.write(message_to_send.encode())
    else:
        ser.open()
        ser.write(message_to_send.encode())


if __name__ == '__main__':
    while True:
        rospy.Subscriber('RosAria/pose', Odometry, odom_callback)

        if q.empty() == False:
            msg = q.get_nowait()
            send_serial(msg)

        if serial_q.empty() == False:
            serial_msg = serial_q.get_nowait()
            pub_cmd(serial_msg)
        else:
            serial_msg = ["1", "0.0", "0.0", "0.0", "0.0", "0.0", "0.0"]
            pub_cmd(serial_msg)
Ejemplo n.º 55
0
class deviceUpdater(object):
    def __init__(self, websocket, args, returning, db):
        self._websocket = websocket
        self._update_queue = Queue()
        self._update_mutex = RLock()
        self._db = db
        self._log = {}
        self._args = args
        self._commands: dict = {}
        self._globaljoblog: dict = {}
        self._current_job_id = []
        self._current_job_device = []
        self._returning = returning
        try:
            if os.path.exists('update_log.json'):
                with open('update_log.json') as logfile:
                    self._log = json.load(logfile)
        except json.decoder.JSONDecodeError:
            logger.error(
                'Corrupted update_log.json file found. Deleting the '
                'file. Please check remaining disk space or disk health.')
            os.remove('update_log.json')

        self.init_jobs()
        self.kill_old_jobs()
        self.load_automatic_jobs()

        self._stop_updater_threads: Event = Event()
        self.t_updater = []
        for i in range(self._args.job_thread_count):
            t = Thread(name='apk_updater-{}'.format(str(i)),
                       target=self.process_update_queue,
                       args=(i, ))
            t.daemon = True
            self.t_updater.append(t)
            t.start()

    def stop_updater(self):
        self._stop_updater_threads.set()
        for thread in self.t_updater:
            thread.join()

    def init_jobs(self):
        self._commands = {}
        if os.path.exists('commands.json'):
            with open('commands.json') as cmdfile:
                self._commands = json.loads(cmdfile.read())

        # load personal commands

        for file in glob.glob(os.path.join("personal_commands", "*.json")):
            try:
                with open(file) as personal_command:
                    peronal_cmd = json.loads(personal_command.read())
                for command in peronal_cmd:
                    if command in self._commands:
                        logger.error(
                            "Command {} already exist - skipping".format(
                                str(command)))
                    else:
                        logger.info(
                            'Loading personal command: {}'.format(command))
                        self._commands[command] = peronal_cmd[command]
            except Exception as e:
                logger.error('Cannot add job {} - Reason: {}'.format(
                    str(file), str(e)))

    def return_commands(self):
        return self._commands

    @logger.catch()
    def restart_job(self, id_: int):
        if (id_) in self._log:
            origin = self._log[id_]['origin']
            file_ = self._log[id_]['file']
            jobtype = self._log[id_]['jobtype']
            globalid = self._log[id_]['globalid']
            redo = self._log[id_].get('redo', False)
            waittime = self._log[id_].get('waittime', 0)
            jobname = self._log[id_].get('redo', None)

            if globalid not in self._globaljoblog:
                self._globaljoblog[globalid] = {}
                self._globaljoblog[globalid]['laststatus'] = None
                self._globaljoblog[globalid]['lastjobend'] = None

            if redo:
                algo = self.get_job_algo_value(algotyp=self._globaljoblog[globalid].get('algotype',
                                                                                        'flex'),
                                               algovalue=self._globaljoblog[globalid].get('algovalue',
                                                                                          0)) \
                       + waittime

                processtime = datetime.timestamp(datetime.now() +
                                                 timedelta(minutes=algo))

                self.write_status_log(str(id_),
                                      field='processingdate',
                                      value=processtime)
                self.add_job(globalid=globalid,
                             origin=origin,
                             file=file_,
                             id_=id_,
                             type=jobtype,
                             counter=0,
                             status='future',
                             waittime=waittime,
                             processtime=processtime,
                             redo=redo,
                             jobname=jobname)

            else:
                self.write_status_log(str(id_),
                                      field='processingdate',
                                      delete=True)
                self.add_job(globalid=globalid,
                             origin=origin,
                             file=file_,
                             id_=id_,
                             type=jobtype,
                             status='requeued',
                             jobname=jobname)

        return True

    def kill_old_jobs(self):
        logger.info("Checking for outdated jobs")
        for job in self._log.copy():
            if self._log[job]['status'] in (
                    'pending', 'starting', 'processing', 'not connected', 'future', 'not required') \
                    and not self._log[job].get('auto', False):
                logger.debug("Cancel job {} - it is outdated".format(str(job)))
                self.write_status_log(str(job),
                                      field='status',
                                      value='cancelled')
            elif self._log[job].get('auto', False):
                self.write_status_log(str(job), delete=True)

    @logger.catch()
    def process_update_queue(self, threadnumber):
        logger.info("Starting Device Job processor thread No {}".format(
            str(threadnumber)))
        time.sleep(10)
        while not self._stop_updater_threads.is_set():
            try:
                jobstatus = jobReturn.UNKNOWN
                try:
                    # item = self._update_queue.get()
                    item = self._update_queue.get_nowait()
                except Empty:
                    time.sleep(1)
                    continue
                if item is None:
                    time.sleep(1)
                    continue

                if item not in self._log:
                    continue

                id_ = item
                origin = self._log[str(id_)]['origin']

                self._update_mutex.acquire()
                try:
                    if origin in self._current_job_device:
                        self._update_queue.put(str(id_))
                        continue

                    self._current_job_device.append(origin)
                finally:
                    self._update_mutex.release()

                file_ = self._log[str(id_)]['file']
                counter = self._log[str(id_)]['counter']
                jobtype = self._log[str(id_)]['jobtype']
                waittime = self._log[str(id_)].get('waittime', 0)
                processtime = self._log[str(id_)].get('processingdate', None)
                globalid = self._log[str(id_)]['globalid']
                redo = self._log[str(id_)].get('redo', False)

                laststatus = self._globaljoblog[globalid]['laststatus']
                lastjobid = self._globaljoblog[globalid].get('lastjobid', 0)
                startwithinit = self._globaljoblog[globalid].get(
                    'startwithinit', False)

                if laststatus is not None and laststatus == 'faulty' and \
                        self._globaljoblog[globalid].get('autojob', False):
                    # breakup job because last job in chain is faulty
                    logger.error(
                        'Breakup job {} on device {} - File/Job: {} - previous job in chain was broken (ID: {})'
                        .format(str(jobtype), str(origin), str(file_),
                                str(id_)))
                    self.write_status_log(str(id_),
                                          field='status',
                                          value='terminated')
                    self.send_webhook(id_=id_, status=jobReturn.TERMINATED)
                    self._current_job_device.remove(origin)

                    continue

                if (
                        laststatus is None or laststatus == 'future') and not startwithinit and processtime is None and \
                        self._globaljoblog[globalid].get('autojob', False):
                    logger.debug(
                        'Autjob (no init run) {} on device {} - File/Job: {} - queued to real starttime (ID: {})'
                        .format(str(jobtype), str(origin), str(file_),
                                str(id_)))
                    # just schedule job - not process the first time
                    processtime = datetime.timestamp(datetime.now(
                    ) + timedelta(
                        minutes=self._globaljoblog[globalid].get('algo', 0) +
                        waittime))
                    self.write_status_log(str(id_),
                                          field='processingdate',
                                          value=processtime)

                    self._globaljoblog[globalid]['lastjobid'] = id_
                    self._globaljoblog[globalid]['laststatus'] = 'future'

                    self.add_job(globalid=globalid,
                                 origin=origin,
                                 file=file_,
                                 id_=id_,
                                 type=jobtype,
                                 counter=counter,
                                 status='future',
                                 waittime=waittime,
                                 processtime=processtime,
                                 redo=redo)

                    self._current_job_device.remove(origin)

                    continue

                if (laststatus is None or laststatus
                        == 'success') and waittime > 0 and processtime is None:
                    # set sleeptime for this job
                    logger.debug(
                        'Job {} on device {} - File/Job: {} - queued to real starttime (ID: {})'
                        .format(str(jobtype), str(origin), str(file_),
                                str(id_)))

                    self._log[str(id_)]['processingdate'] = datetime.timestamp(
                        datetime.now() + timedelta(minutes=waittime))

                    self._globaljoblog[globalid]['lastjobid'] = id_
                    self._globaljoblog[globalid]['laststatus'] = 'success'

                    self.add_job(globalid=globalid,
                                 origin=origin,
                                 file=file_,
                                 id_=id_,
                                 type=jobtype,
                                 counter=counter,
                                 status='future',
                                 waittime=waittime,
                                 processtime=processtime,
                                 redo=redo)

                    self._current_job_device.remove(origin)

                    continue

                if laststatus is not None and laststatus in ('pending', 'future', 'failure', 'interrupted',
                                                             'not connected') and lastjobid != id_ \
                        and processtime is None:
                    logger.debug(
                        'Job {} on device {} - File/Job: {} - queued because last job in jobchain '
                        'is not processed till now (ID: {})'.format(
                            str(jobtype), str(origin), str(file_), str(id_)))
                    # skipping because last job in jobchain is not processed till now
                    self.add_job(globalid=globalid,
                                 origin=origin,
                                 file=file_,
                                 id_=id_,
                                 type=jobtype,
                                 counter=counter,
                                 status='future',
                                 waittime=waittime,
                                 processtime=processtime,
                                 redo=redo)

                    self._current_job_device.remove(origin)

                    continue

                if processtime is not None and datetime.fromtimestamp(
                        processtime) > datetime.now():
                    time.sleep(1)
                    logger.debug(
                        'Job {} on device {} - File/Job: {} - queued of processtime in future (ID: {})'
                        .format(str(jobtype), str(origin), str(file_),
                                str(id_)))
                    self.add_job(globalid=globalid,
                                 origin=origin,
                                 file=file_,
                                 id_=id_,
                                 type=jobtype,
                                 counter=counter,
                                 status='future',
                                 waittime=waittime,
                                 processtime=processtime,
                                 redo=redo)

                    self._current_job_device.remove(origin)

                    continue

                if id_ in self._log:
                    self._current_job_id.append(id_)

                    if 'processingdate' in self._log[id_]:
                        self.write_status_log(str(id_),
                                              field='processingdate',
                                              delete=True)

                    logger.info(
                        "Job for {} (File/Job: {}) started (ID: {})".format(
                            str(origin), str(file_), str(id_)))
                    self.write_status_log(str(id_),
                                          field='status',
                                          value='processing')
                    self.write_status_log(str(id_),
                                          field='lastprocess',
                                          value=int(time.time()))

                    errorcount = 0

                    while jobstatus not in SUCCESS_STATES and errorcount < 3:

                        temp_comm = self._websocket.get_origin_communicator(
                            origin)

                        if temp_comm is None:
                            errorcount += 1
                            logger.error(
                                'Cannot start job {} on device {} - File/Job: {} - Device not connected (ID: {})'
                                .format(str(jobtype), str(origin), str(file_),
                                        str(id_)))
                            self._globaljoblog[globalid][
                                'laststatus'] = 'not connected'
                            self.write_status_log(str(id_),
                                                  field='laststatus',
                                                  value='not connected')
                            self._globaljoblog[globalid]['lastjobid'] = id_
                            jobstatus = jobReturn.NOCONNECT
                            time.sleep(5)

                        else:
                            # stop worker
                            self._websocket.set_job_activated(origin)
                            self.write_status_log(str(id_),
                                                  field='status',
                                                  value='starting')
                            try:
                                if self.start_job_type(item, jobtype,
                                                       temp_comm):
                                    logger.info(
                                        'Job {} executed successfully - Device {} - File/Job {} (ID: {})'
                                        .format(str(jobtype), str(origin),
                                                str(file_), str(id_)))
                                    if self._log[str(
                                            id_)]['status'] == 'not required':
                                        jobstatus = jobReturn.NOT_REQUIRED
                                    elif self._log[str(
                                            id_)]['status'] == 'not supported':
                                        jobstatus = jobReturn.NOT_SUPPORTED
                                    else:
                                        self.write_status_log(str(id_),
                                                              field='status',
                                                              value='success')
                                        jobstatus = jobReturn.SUCCESS
                                    self._globaljoblog[globalid][
                                        'laststatus'] = 'success'
                                    self._globaljoblog[globalid][
                                        'lastjobid'] = id_
                                else:
                                    logger.error(
                                        'Job {} could not be executed successfully - Device {} - File/Job {} (ID: {})'
                                        .format(str(jobtype), str(origin),
                                                str(file_), str(id_)))
                                    errorcount += 1
                                    self._globaljoblog[globalid][
                                        'laststatus'] = 'failure'
                                    self.write_status_log(str(id_),
                                                          field='laststatus',
                                                          value='failure')
                                    self._globaljoblog[globalid][
                                        'lastjobid'] = id_
                                    jobstatus = jobReturn.FAILURE

                                # start worker
                                self._websocket.set_job_deactivated(origin)

                            except:
                                logger.error(
                                    'Job {} could not be executed successfully (fatal error) '
                                    '- Device {} - File/Job {} (ID: {})'.
                                    format(str(jobtype), str(origin),
                                           str(file_), str(id_)))
                                errorcount += 1
                                self._globaljoblog[globalid][
                                    'laststatus'] = 'interrupted'
                                self.write_status_log(str(id_),
                                                      field='status',
                                                      value='interrupted')
                                self._globaljoblog[globalid]['lastjobid'] = id_
                                jobstatus = jobReturn.FAILURE

                    # check jobstatus and readd if possible
                    if jobstatus not in SUCCESS_STATES and (
                            jobstatus == jobReturn.NOCONNECT
                            and self._args.job_restart_notconnect == 0):
                        logger.error(
                            "Job for {} (File/Job: {} - Type {}) failed 3 times in row - aborting (ID: {})"
                            .format(str(origin), str(file_), str(jobtype),
                                    str(id_)))
                        self._globaljoblog[globalid]['laststatus'] = 'faulty'
                        self.write_status_log(str(id_),
                                              field='status',
                                              value='faulty')

                        if redo and self._globaljoblog[globalid].get(
                                'redoonerror', False):
                            logger.info(
                                'Readd this automatic job for {} (File/Job: {} - Type {})  (ID: {})'
                                .format(str(origin), str(file_), str(jobtype),
                                        str(id_)))
                            self.restart_job(id_=id_)
                            self._globaljoblog[globalid]['lastjobid'] = id_
                            self._globaljoblog[globalid][
                                'laststatus'] = 'success'

                    elif jobstatus in SUCCESS_STATES and redo:
                        logger.info(
                            'Readd this automatic job for {} (File/Job: {} - Type {})  (ID: {})'
                            .format(str(origin), str(file_), str(jobtype),
                                    str(id_)))
                        self.restart_job(id_=id_)

                    elif jobstatus == jobReturn.NOCONNECT and self._args.job_restart_notconnect > 0:
                        logger.error(
                            "Job for {} (File/Job: {} - Type {}) failed 3 times in row - requeued it (ID: {})"
                            .format(str(origin), str(file_), str(jobtype),
                                    str(id_)))
                        processtime = datetime.timestamp(
                            datetime.now() + timedelta(
                                minutes=self._args.job_restart_notconnect))
                        self.write_status_log(str(id_),
                                              field='processingdate',
                                              value=processtime)

                        self._globaljoblog[globalid]['lastjobid'] = id_
                        self._globaljoblog[globalid]['laststatus'] = 'future'

                        self.add_job(globalid=globalid,
                                     origin=origin,
                                     file=file_,
                                     id_=id_,
                                     type=jobtype,
                                     counter=counter,
                                     status='future',
                                     waittime=waittime,
                                     processtime=processtime,
                                     redo=redo)

                    self.send_webhook(id_=id_, status=jobstatus)

                    self._current_job_id.remove(id_)
                    self._current_job_device.remove(origin)
                    errorcount = 0
                    time.sleep(10)

            except KeyboardInterrupt as e:
                logger.info(
                    "process_update_queue-{} received keyboard interrupt, stopping"
                    .format((str(threadnumber))))
                break

            time.sleep(2)
        logger.info("Updater thread stopped")

    @logger.catch()
    def preadd_job(self, origin, job, id_, type, globalid=None):
        logger.info(
            'Adding Job {} for Device {} - File/Job: {} (ID: {})'.format(
                str(type), str(origin), str(job), str(id_)))

        globalid = globalid if globalid is not None else id_

        if globalid not in self._globaljoblog:
            self._globaljoblog[globalid] = {}

        self._globaljoblog[globalid]['laststatus'] = None
        self._globaljoblog[globalid]['lastjobend'] = None

        if jobType[type.split('.')[1]] == jobType.CHAIN:

            for subjob in self._commands[job]:
                logger.debug(subjob)
                self.add_job(globalid=globalid,
                             origin=origin,
                             file=subjob['SYNTAX'],
                             id_=int(time.time()),
                             type=subjob['TYPE'],
                             waittime=subjob.get('WAITTIME', 0),
                             redo=self._globaljoblog[globalid].get(
                                 'redo', False),
                             fieldname=subjob.get('FIELDNAME', 'unknown'),
                             jobname=job)
                time.sleep(1)
        else:
            self.add_job(globalid=globalid,
                         origin=origin,
                         file=job,
                         id_=int(id_),
                         type=type)

    @logger.catch()
    def add_job(self,
                globalid,
                origin,
                file,
                id_: int,
                type,
                counter=0,
                status='pending',
                waittime=0,
                processtime=None,
                redo=False,
                fieldname=None,
                jobname=None):
        if str(id_) not in self._log:
            log_entry = ({
                'id':
                int(id_),
                'origin':
                origin,
                'jobname':
                jobname if jobname is not None else file,
                'file':
                file,
                'status':
                status,
                'fieldname':
                fieldname if fieldname is not None else "unknown",
                'counter':
                int(counter),
                'jobtype':
                str(type),
                'globalid':
                int(globalid),
                'waittime':
                waittime,
                'laststatus':
                'init',
                'redo':
                redo,
                'auto':
                self._globaljoblog[globalid].get('autojob', False)
            })
            self.write_status_log(str(id_), field=log_entry)
        else:
            self.write_status_log(str(id_), field='status', value=status)
            self.write_status_log(str(id_), field='counter', value=counter)

        self._update_queue.put(str(id_))

    def write_status_log(self, id_, field=None, value=None, delete=False):
        self._update_mutex.acquire()
        try:
            if delete:
                if field is None:
                    del self._log[str(id_)]
                else:
                    if field in self._log[str(id_)]:
                        del self._log[str(id_)][field]
            else:
                if str(id_) not in self._log:
                    self._log[str(id_)] = {}
                if value is not None:
                    self._log[str(id_)][field] = value
                else:
                    self._log[str(id_)] = field
        finally:
            self._update_mutex.release()

        self.update_status_log()

    def update_status_log(self):
        with open('update_log.json', 'w') as outfile:
            json.dump(self._log, outfile, indent=4)

    @logger.catch()
    def delete_log_id(self, id_: str):
        if id_ not in self._current_job_id:
            self.write_status_log(str(id_), delete=True)
            return True
        return False

    def get_log(self, withautojobs=False):
        if withautojobs:
            return [
                self._log[x] for x in self._log
                if self._log[x].get('auto', False)
            ]
        return [
            self._log[x] for x in self._log
            if not self._log[x].get('auto', False)
        ]

    @logger.catch()
    def start_job_type(self, item, jobtype, ws_conn):
        try:
            jobtype = jobType[jobtype.split('.')[1]]
            if jobtype == jobType.INSTALLATION:
                file_ = self._log[str(item)]['file']
                returning = ws_conn.install_apk(300,
                                                filepath=os.path.join(
                                                    self._args.upload_path,
                                                    file_))
                return returning if not 'RemoteGpsController'.lower() in str(
                    file_).lower() else True
            elif jobtype == jobtype.SMART_UPDATE:
                package = self._log[str(item)]['file']
                version_job = "dumpsys package %s | grep versionName" % (
                    package, )
                architecture_job = ws_conn.passthrough(
                    'getprop ro.product.cpu.abi')
                package_ver_job = ws_conn.passthrough(version_job)
                try:
                    architecture = re.search(r'\[(\S+)\]',
                                             architecture_job).group(1)
                except:
                    logger.warning(
                        'Unable to determine the architecture of the device')
                    return False
                try:
                    package_ver = re.search(r'versionName=([0-9\.]+)',
                                            package_ver_job).group(1)
                except:
                    logger.warning('Unable to determine version for {}: {}',
                                   self._log[str(item)]['file'],
                                   package_ver_job)
                    return False
                mad_apk = apk_util.get_mad_apk(
                    self._db,
                    global_variables.MAD_APK_USAGE[self._log[str(item)]
                                                   ['file']],
                    architecture=architecture)
                if not mad_apk or mad_apk['file_id'] is None:
                    try:
                        arch = architecture
                    except:
                        arch = 'Unknown'
                    logger.warning('No MAD APK for {} [{}]', package, arch)
                    return False
                requires_update = apk_util.is_newer_version(
                    package_ver, mad_apk['version'])
                # Validate it is supported
                if package == 'com.nianticlabs.pokemongo':
                    if architecture == 'armeabi-v7a':
                        bits = '32'
                    else:
                        bits = '64'
                    try:
                        with open('configs/addresses.json') as fh:
                            address_object = json.load(fh)
                            composite_key = '%s_%s' % (
                                mad_apk['version'],
                                bits,
                            )
                            address_object[composite_key]
                    except KeyError:
                        try:
                            requests.get(global_variables.ADDRESSES_GITHUB
                                         ).json()[composite_key]
                        except KeyError:
                            logger.info(
                                'Unable to install APK since {} is not supported',
                                composite_key)
                            self.write_status_log(str(item),
                                                  field='status',
                                                  value='not supported')
                            return True
                    logger.debug('Supported PoGo version detected')
                if requires_update is None:
                    logger.info(
                        'Both versions are the same.  No update required')
                    self.write_status_log(str(item),
                                          field='status',
                                          value='not required')
                    return True
                elif requires_update is False:
                    logger.warning('MAD APK for {} is out of date', package)
                    return False
                else:
                    logger.info('Smart Update APK Installation for {} to {}',
                                package, self._log[str(item)]['origin'])
                    apk_file = bytes()
                    for chunk in apk_util.chunk_generator(
                            self._db, mad_apk['file_id']):
                        apk_file += chunk
                    returning = ws_conn.install_apk(300, data=apk_file)
                    return returning if not 'RemoteGpsController'.lower(
                    ) in str(self._log[str(item)]['file']).lower() else True
            elif jobtype == jobType.REBOOT:
                return ws_conn.reboot()
            elif jobtype == jobType.RESTART:
                return ws_conn.restart_app("com.nianticlabs.pokemongo")
            elif jobtype == jobType.STOP:
                return ws_conn.stop_app("com.nianticlabs.pokemongo")
            elif jobtype == jobType.START:
                return ws_conn.start_app("com.nianticlabs.pokemongo")
            elif jobtype == jobType.PASSTHROUGH:
                command = self._log[str(item)]['file']
                returning = ws_conn.passthrough(command).replace(
                    '\r', '').replace('\n', '').replace('  ', '')
                self.write_status_log(str(item),
                                      field='returning',
                                      value=returning)
                self.set_returning(
                    origin=self._log[str(item)]['origin'],
                    fieldname=self._log[str(item)].get('fieldname'),
                    value=returning)
                return returning if 'KO' not in returning else False
            return False
        except Exception as e:
            logger.error(
                'Error while getting response from device - Reason: {}'.format(
                    str(e)))
        return False

    def delete_log(self, onlysuccess=False):
        if onlysuccess:
            for job in self._log.copy():
                if self._log[job]['status'] in [
                        'success', 'not required'
                ] and not self._log[job].get('redo', False):
                    self.write_status_log(str(job), delete=True)

        else:
            for job in self._log.copy():
                if not self._log[job].get('redo', False):
                    self.delete_log_id(job)

    def send_webhook(self, id_, status):
        if not self._log[str(id_)]['auto']:
            return

        try:
            if jobReturn(status).name not in self._args.job_dt_send_type.split(
                    '|') or not self._args.job_dt_wh:
                return

            from discord_webhook import DiscordWebhook, DiscordEmbed
            _webhook = DiscordWebhook(url=self._args.job_dt_wh_url)

            origin = self._log[str(id_)]['origin']
            file_ = self._log[str(id_)]['file']
            processtime = self._log[str(id_)].get('processingdate', None)
            returning = self._log[str(id_)].get('returning', '-')

            logger.info("Send discord status for device {} (Job: {})".format(
                str(origin), str(file_)))

            embed = DiscordEmbed(title='MAD Job Status',
                                 description='Automatic Job processed',
                                 color=242424)
            embed.set_author(name='MADBOT')
            embed.add_embed_field(name='Origin', value=origin)
            embed.add_embed_field(name='Jobname', value=file_)
            embed.add_embed_field(name='Retuning', value=returning)
            embed.add_embed_field(name='Status', value=jobReturn(status).name)
            embed.add_embed_field(name='Next run',
                                  value=str(
                                      datetime.fromtimestamp(processtime)
                                      if processtime is not None else "-"))
            _webhook.add_embed(embed)
            _webhook.execute()
            embed = None
        except Exception as e:
            logger.error(
                'Cannot send discord webhook for origin {} - Job {} - Reason: {}'
                .format(str(origin), str(file_), str(e)))

    def load_automatic_jobs(self):
        self._globaljoblog = {}
        autocommandfile = os.path.join(self._args.file_path,
                                       'autocommands.json')
        if os.path.exists(autocommandfile):
            with open(autocommandfile) as cmdfile:
                autocommands = json.loads(cmdfile.read())

            logger.info('Found {} autojobs - add them'.format(
                str(len(autocommands))))

            for autocommand in autocommands:
                origins = autocommand['origins'].split('|')
                for origin in origins:
                    redo = autocommand.get('redo', False)
                    algo = self.get_job_algo_value(
                        algotyp=autocommand.get('algotype', 'flex'),
                        algovalue=autocommand.get('algovalue', 0))
                    startwithinit = autocommand.get('startwithinit', False)

                    job = autocommand['job']

                    globalid = int(time.time())

                    self._globaljoblog[globalid] = {}
                    self._globaljoblog[globalid]['redo'] = redo
                    self._globaljoblog[globalid]['algo'] = algo
                    self._globaljoblog[globalid][
                        'algovalue'] = autocommand.get('algovalue', 0)
                    self._globaljoblog[globalid]['algotype'] = autocommand.get(
                        'algotype', 'flex')
                    self._globaljoblog[globalid][
                        'startwithinit'] = startwithinit
                    self._globaljoblog[globalid]['autojob'] = True
                    self._globaljoblog[globalid][
                        'redoonerror'] = autocommand.get('redoonerror', False)

                    self.preadd_job(origin=origin,
                                    job=job,
                                    id_=int(time.time()),
                                    type=str(jobType.CHAIN))
                    # get a unique id !
                    time.sleep(1)
        else:
            logger.info('Did not find any automatic jobs')

    def get_job_algo_value(self, algotyp, algovalue):
        if algotyp == "loop":
            # returning value as minutes
            return algovalue

        # calc diff in minutes to get exact starttime

        algotime = algovalue.split(':')
        tm = datetime.now().replace(hour=int(algotime[0]),
                                    minute=int(algotime[1]),
                                    second=0,
                                    microsecond=0)

        return (tm - datetime.now()).seconds / 60

    def set_returning(self, origin, fieldname, value):
        if origin not in self._returning:
            self._returning[origin] = {}

        if fieldname not in self._returning[origin]:
            self._returning[origin][fieldname] = ""

        self._returning[origin][fieldname] = str(value)
Ejemplo n.º 56
0
class SparkSpaceTests(unittest.TestCase):
    def setUp(self):
        self.context = Context()
        self.ears = Queue()
        self.fan = Queue()
        self.space = SparkSpace(context=self.context,
                                ears=self.ears,
                                fan=self.fan)

    def tearDown(self):
        del self.space
        del self.fan
        del self.ears
        del self.context
        collected = gc.collect()
        if collected:
            logging.info("Garbage collector: collected %d objects." %
                         (collected))

    def test_on_init(self):

        logging.info("*** on_init")

        self.assertEqual(self.space.context.get('space.token'), None)
        self.assertEqual(self.space.api, None)
        self.assertEqual(self.space._last_message_id, 0)

        space = SparkSpace(context=self.context, token='b')
        self.assertEqual(space.context.get('space.token'), 'b')

    def test_configure(self):

        logging.info("*** configure")

        settings = {  # from settings to member attributes
            'space': {
                'type': 'spark',
                'room': 'My preferred room',
                'participants':
                ['*****@*****.**', '*****@*****.**'],
                'token': 'hkNWEtMJNkODVGlZWU1NmYtyY',
            }
        }
        self.space.configure(settings=settings)
        self.assertEqual(self.space.context.get('space.room'),
                         'My preferred room')
        self.assertEqual(self.space.configured_title(), 'My preferred room')
        self.assertEqual(self.space.context.get('space.participants'),
                         ['*****@*****.**', '*****@*****.**'])
        self.assertEqual(self.space.context.get('space.token'),
                         'hkNWEtMJNkODVGlZWU1NmYtyY')

        self.space.context.clear()
        self.space.configure({
            'space': {
                'type': 'spark',
                'room': 'My preferred room',
                'participants': '*****@*****.**',
            }
        })
        self.assertEqual(self.space.context.get('space.room'),
                         'My preferred room')
        self.assertEqual(self.space.context.get('space.participants'),
                         ['*****@*****.**'])

        with self.assertRaises(KeyError):  # missing key
            self.space.context.clear()
            self.space.configure({
                'spark': {
                    'participants':
                    ['*****@*****.**', '*****@*****.**'],
                    'team':
                    'Anchor team',
                    'token':
                    'hkNWEtMJNkODk3ZDZLOGQ0OVGlZWU1NmYtyY',
                }
            })

    def test_connect(self):

        logging.info("*** connect")

        def my_factory(access_token):
            return FakeApi(access_token=access_token)

        self.space.context.set('space.token', None)
        with self.assertRaises(AssertionError):
            self.space.connect()

        self.space.context.set('space.token', 'a')
        self.space.connect(factory=my_factory)
        self.assertEqual(self.space.api.token, 'a')
        self.assertEqual(self.space.audit_api, None)

        self.space.context.set('space.token', 'a')
        self.space.context.set('space.audit_token', 'b')
        self.space.connect(factory=my_factory)
        self.assertEqual(self.space.api.token, 'a')
        self.assertEqual(self.space.audit_api.token, 'b')

    def test_on_connect(self):

        logging.info("*** on_connect")

        self.space.api = FakeApi(me=FakeBot())
        self.space.on_connect()
        self.assertTrue(self.space.api.people.me.called)
        self.assertEqual(self.context.get('bot.address'), '*****@*****.**')
        self.assertEqual(self.context.get('bot.name'), 'shelly')
        self.assertTrue(len(self.context.get('bot.id')) > 20)

    def test_list_group_channels(self):

        logging.info("*** list_group_channels")

        self.space.api = FakeApi()
        channels = self.space.list_group_channels()
        self.assertEqual(len(channels), 1)
        self.assertTrue(self.space.api.rooms.list.called)
        channel = channels[0]
        self.assertEqual(channel.id, '*id')
        self.assertEqual(channel.title, '*title')

    def test_create(self):

        logging.info("*** create")

        with self.assertRaises(AssertionError):
            self.space.create(title=None)

        with self.assertRaises(AssertionError):
            self.space.create(title='')

        with self.assertRaises(AssertionError):
            self.space.create(title='*title')

        self.space.api = FakeApi()
        channel = self.space.create(title='*title')
        self.assertTrue(self.space.api.rooms.create.called)
        self.assertEqual(channel.id, '*id')
        self.assertEqual(channel.title, '*title')

    def test_get_by_title(self):

        logging.info("*** get_by_title")

        with self.assertRaises(AssertionError):
            channel = self.space.get_by_title(None)

        with self.assertRaises(AssertionError):
            channel = self.space.get_by_title('')

        with self.assertRaises(AssertionError):
            channel = self.space.get_by_title('*no*api*anyway')

        self.space.api = FakeApi()
        channel = self.space.get_by_title('*does*not*exist')
        self.assertEqual(channel, None)
        self.assertTrue(self.space.api.rooms.list.called)

        channel = self.space.get_by_title('*title')
        self.assertEqual(
            channel,
            Channel({
                "id": "*id",
                "is_direct": False,
                "is_group": True,
                "is_moderated": True,
                "is_team": False,
                "team_id": "*team",
                "title": "*title",
                "type": "group",
            }))

        class Intruder(object):
            def list(self, **kwargs):
                raise Exception('TEST')

        self.space.api.rooms = Intruder()
        channel = self.space.get_by_title('*title')
        self.assertEqual(channel, None)

    def test_get_by_id(self):

        logging.info("*** get_by_id")

        with self.assertRaises(AssertionError):
            channel = self.space.get_by_id(None)

        with self.assertRaises(AssertionError):
            channel = self.space.get_by_id('')

        with self.assertRaises(AssertionError):
            channel = self.space.get_by_id('*no*api*anyway')

        self.space.api = FakeApi()

        channel = self.space.get_by_id('*id')
        self.assertEqual(
            channel,
            Channel({
                "id": "*id",
                "is_direct": False,
                "is_group": True,
                "is_moderated": True,
                "is_team": False,
                "team_id": "*team",
                "title": "*title",
                "type": "group",
            }))

        class Intruder(object):
            def get(self, label, **kwargs):
                raise Exception('TEST')

        self.space.api.rooms = Intruder()
        channel = self.space.get_by_id('*id')
        self.assertEqual(channel, None)

    def test_get_by_person(self):

        logging.info("*** get_by_person")

        with self.assertRaises(AssertionError):
            channel = self.space.get_by_person(None)

        with self.assertRaises(AssertionError):
            channel = self.space.get_by_person('')

        with self.assertRaises(AssertionError):
            channel = self.space.get_by_person('*no*api*anyway')

        self.space.api = FakeApi(room=FakeDirectRoom())

        channel = self.space.get_by_person('Marcel Jones')
        self.assertEqual(
            channel,
            Channel({
                "id": "*direct_id",
                "is_direct": True,
                "is_group": False,
                "is_moderated": False,
                "is_team": False,
                "team_id": None,
                "title": "Marcel Jones",
                "type": "direct",
            }))

        class Intruder(object):
            def list(self, **kwargs):
                raise Exception('TEST')

        self.space.api.rooms = Intruder()
        channel = self.space.get_by_person('Marcel Jones')
        self.assertEqual(channel, None)

    def test_update(self):

        logging.info("*** update")

        self.space.api = FakeApi()
        self.space.update(channel=FakeChannel())

    def test_delete(self):

        logging.info("*** delete")

        # explicit id, room exists
        self.space.api = FakeApi()
        self.space.delete(id='*id')
        self.assertTrue(self.space.api.rooms.delete.called)

        # explicit id, room does not exists
        self.space.api = FakeApi()
        self.space.delete(id='*ghost*room')
        self.assertTrue(self.space.api.rooms.delete.called)

    def test_get_team(self):

        logging.info("*** get_team")

        class Team(object):
            name = '*name'
            id = '456'

        self.space.api = FakeApi(teams=[Team()])
        team = self.space.get_team(name='*name')
        self.assertTrue(self.space.api.teams.list.called)
        self.assertEqual(team.name, '*name')
        self.assertEqual(team.id, '456')

        self.space.api = FakeApi(teams=[Team()])
        team = self.space.get_team(name='*unknown')
        self.assertTrue(self.space.api.teams.list.called)
        self.assertEqual(team, None)

    def test_list_participants(self):

        logging.info("*** list_participants")

        self.space.api = FakeApi()
        self.space.list_participants(id='*id')
        self.assertTrue(self.space.api.memberships.list.called)

    def test_add_participants(self):

        logging.info("*** add_participants")

        with mock.patch.object(self.space, 'add_participant') as mocked:

            self.space.add_participants(id='*id', persons=['*****@*****.**'])
            mocked.assert_called_with(id='*id', person='*****@*****.**')

    def test_add_participant(self):

        logging.info("*** add_participant")

        self.space.api = FakeApi()
        self.space.add_participant(id='*id', person='*****@*****.**')
        self.assertTrue(self.space.api.memberships.create.called)

    def test_remove_participants(self):

        logging.info("*** remove_participants")

        with mock.patch.object(self.space, 'remove_participant') as mocked:

            self.space.remove_participants(id='*id',
                                           persons=['*****@*****.**'])
            mocked.assert_called_with(id='*id', person='*****@*****.**')

    def test_remove_participant(self):

        logging.info("*** remove_participant")

        self.space.api = FakeApi()
        self.space.remove_participant(id='*id', person='*****@*****.**')
        self.assertTrue(self.space.api.memberships.delete.called)

    def test_post_message(self):

        logging.info("*** post_message")

        self.space.api = FakeApi()
        self.space.post_message(id='*id', text='hello world')
        self.assertTrue(self.space.api.messages.create.called)

        self.space.api = FakeApi()
        self.space.post_message(person='*****@*****.**', text='hello world')
        self.assertTrue(self.space.api.messages.create.called)

        self.space.api = FakeApi()
        self.space.post_message(id='*id', content='hello world')
        self.assertTrue(self.space.api.messages.create.called)

        self.space.api = FakeApi()
        self.space.post_message(person='*****@*****.**', content='hello world')
        self.assertTrue(self.space.api.messages.create.called)

        self.space.api = FakeApi()
        with self.assertRaises(AssertionError):
            self.space.post_message(text='hello world',
                                    content='hello world',
                                    file='./test_messages/sample.png')

        self.space.api = FakeApi()
        with self.assertRaises(AssertionError):
            self.space.post_message(id='*id',
                                    person='*****@*****.**',
                                    text='hello world',
                                    content='hello world',
                                    file='./test_messages/sample.png')

        self.space.api = FakeApi()
        self.space.post_message(id='*id',
                                text='hello world',
                                content='hello world',
                                file='./test_messages/sample.png')
        self.assertTrue(self.space.api.messages.create.called)

        self.space.api = FakeApi()
        self.space.post_message(person='*****@*****.**',
                                text='hello world',
                                content='hello world',
                                file='./test_messages/sample.png')
        self.assertTrue(self.space.api.messages.create.called)

    def test_register(self):

        logging.info("*** register")

        self.context.set('bot.id', '*id')
        self.context.set('spark.token', '*token')

        self.space.api = FakeApi()
        self.space.register('*hook')
        self.assertTrue(self.space.api.webhooks.create.called)
        self.assertFalse(self.context.get('audit.has_been_armed'))

        self.space.api = FakeApi()
        self.space.audit_api = FakeApi()
        self.space.register('*hook')
        self.assertTrue(self.space.api.webhooks.create.called)
        self.assertTrue(self.context.get('audit.has_been_armed'))

    def test_deregister(self):

        logging.info("*** deregister")

        self.space.api = FakeApi()
        self.context.set('bot.id', '*id')
        self.space.deregister()
        self.assertTrue(self.space.api.webhooks.list.called)

    def test_run(self):

        logging.info("*** run")

        self.space.api = FakeApi()

        self.space.PULL_INTERVAL = 0.001
        mocked = mock.Mock(return_value=[])
        self.space.pull = mocked

        p = self.space.start()
        p.join(0.01)
        if p.is_alive():
            logging.info('Stopping puller')
            self.context.set('general.switch', 'off')
            p.join()

        self.assertFalse(p.is_alive())

    def test_webhook(self):

        logging.info("*** webhook")

        fake_message = {
            u'status': u'active',
            u'resource': u'messages',
            u'name': u'shellbot-messages',
            u'created': u'2017-07-30T20:14:24.050Z',
            u'appId': u'Y2lzY29zcGFyazovL3VzLmM3ZDUxNWNiNGEwY2M5MWFh',
            u'id': u'Y2lzY29zcGFyazovL3VzjI0MTM2ZjgwY2Yy',
            u'orgId': u'Y2lzY29zcGFyazovL3VYjU1ZS00ODYzY2NmNzIzZDU',
            u'createdBy': u'Y2lzY29zcGFyazovL3VzLS01ZGI5M2Y5MjI5MWM',
            u'targetUrl': u'http://0dab1.ngrok.io/hook',
            u'ownedBy': u'creator',
            u'actorId': u'Y2lzY29zcGFyazovL3VzL1BFkyMzU',
            u'data': {
                u'roomType': u'group',
                u'created': u'2017-07-30T20:14:50.882Z',
                u'personId': u'Y2lzY29zcGFyayYi1mYWYwZWQwMjkyMzU',
                u'personEmail': u'*****@*****.**',
                u'mentionedPeople': [u'Y2lzY29zcGFyazovL3VGI5M2Y5MjI5MWM'],
                u'roomId': u'Y2lzY29zcGFyazovL3VzL1NzUtYzc2ZDMyOGY0Y2Rj',
                u'id': '*123',
            },
            u'event': u'created',
        }
        self.space.api = FakeApi()
        self.assertEqual(self.space.webhook(fake_message), 'OK')
        self.assertTrue(self.space.api.messages.get.called)
        data = self.space.ears.get()
        self.assertEqual(
            yaml.safe_load(data), {
                'text': '*message',
                'content': '*message',
                'from_id': None,
                'from_label': None,
                'hook': 'shellbot-messages',
                'stamp': '2017-07-19T05:29:23.962Z',
                'created': '2017-07-19T05:29:23.962Z',
                'channel_id': None,
                'type': 'message',
                'is_direct': False,
                'mentioned_ids': []
            })

        with self.assertRaises(Exception):
            print(self.space.ears.get_nowait())
        with self.assertRaises(Exception):
            print(self.space.fan.get_nowait())

        fake_message = {
            u'status': u'active',
            u'resource': u'messages',
            u'name': u'shellbot-audit',
            u'created': u'2017-07-30T20:25:29.924Z',
            u'appId': u'Y2lzY29zcGFyazovL3VzL0FQUE2YyNjZhYmY2NmM5OTllYzFm',
            u'id': u'Y2lzY29zcGFyazovL3VzL1dFC00NzllLTg0MDQtZGQ2NGJiNTk3Nzdi',
            u'orgId': u'Y2lzY29zcGFyazovL3VzL09SR0FOSVpBVY2NmNzIzZDU',
            u'createdBy': u'Y2lzY29zcGFyazovL3VzL1BFTTIyYi1mYWYwZWQwMjkyMzU',
            u'targetUrl': u'http://0dab1.ngrok.io/hook',
            u'ownedBy': u'creator',
            u'actorId': u'Y2lzY29zcGFyazovL3VzLM2Y5MjI5MWM',
            u'data': {
                u'files':
                [u'http://hydra-a5.wbx2.com/contents/Y2lzY29zcGFWY5LzA'],
                u'roomType': u'group',
                u'created': u'2017-07-30T20:25:33.803Z',
                u'personId': u'Y2lzY29zcGFyazovL3VzL1BFT5M2Y5MjI5MWM',
                u'personEmail': u'*****@*****.**',
                u'roomId': u'Y2lzY29zcGFyazovL3VzL1JPTyNmFhNWYxYTY4',
                u'id': u'*123',
            },
            u'event': u'created',
        }

        self.space.audit_api = FakeApi()
        self.assertEqual(self.space.webhook(fake_message), 'OK')
        self.assertTrue(self.space.audit_api.messages.get.called)
        data = self.space.fan.get()
        self.assertEqual(
            yaml.safe_load(data), {
                'text': '*message',
                'content': '*message',
                'from_id': None,
                'from_label': None,
                'hook': 'shellbot-audit',
                'stamp': '2017-07-19T05:29:23.962Z',
                'created': '2017-07-19T05:29:23.962Z',
                'channel_id': None,
                'type': 'message',
                'is_direct': False,
                'mentioned_ids': []
            })

        with self.assertRaises(Exception):
            print(self.space.ears.get_nowait())
        with self.assertRaises(Exception):
            print(self.space.fan.get_nowait())

    def test_pull(self):

        logging.info("*** pull")

        self.space.api = FakeApi(messages=[FakeMessage()])

        self.assertEqual(self.space._last_message_id, 0)
        self.space.pull()
        self.assertEqual(self.context.get('puller.counter'), 1)
        self.assertTrue(self.space.api.messages.list.called)
        self.assertEqual(self.space._last_message_id, '*123')

        self.space.pull()
        self.assertEqual(self.context.get('puller.counter'), 2)
        self.assertEqual(self.space._last_message_id, '*123')

        self.space.pull()
        self.assertEqual(self.context.get('puller.counter'), 3)
        self.assertEqual(self.space._last_message_id, '*123')

        self.assertEqual(
            yaml.safe_load(self.ears.get()), {
                'text': '*message',
                'content': '*message',
                'from_id': None,
                'from_label': None,
                'hook': 'pull',
                'stamp': '2017-07-19T05:29:23.962Z',
                'created': '2017-07-19T05:29:23.962Z',
                'channel_id': None,
                'type': 'message',
                'is_direct': False,
                'mentioned_ids': []
            })
        with self.assertRaises(Exception):
            print(self.ears.get_nowait())

    def test_on_message(self):

        logging.info("*** on_message")

        class MySpace(SparkSpace):
            def name_attachment(self, url, token=None):
                return 'some_file.pdf'

            def get_attachment(self, url, token=None):
                return b'hello world'

        self.space = MySpace(context=self.context)

        self.space.on_message(my_message, self.ears)
        message = my_message.copy()
        message.update({"type": "message"})
        message.update({"content": message['text']})
        message.update({"attachment": "some_file.pdf"})
        message.update({"url": "http://www.example.com/images/media.png"})
        message.update({"from_id": '*matt*id'})
        message.update({"from_label": '*****@*****.**'})
        message.update({'is_direct': False})
        message.update({"mentioned_ids": ['*matt*id', '*julie*id']})
        message.update({"channel_id": '*id1'})
        message.update({"stamp": '2015-10-18T14:26:16+00:00'})
        self.maxDiff = None
        self.assertEqual(yaml.safe_load(self.ears.get()), message)

        self.space.on_message(my_private_message, self.ears)
        message = my_private_message.copy()
        message.update({"type": "message"})
        message.update({"content": message['text']})
        message.update({"from_id": '*foo*id'})
        message.update({"from_label": '*****@*****.**'})
        message.update({'is_direct': True})
        message.update({"mentioned_ids": []})
        message.update({"channel_id": '*direct*id'})
        message.update({"stamp": '2017-07-22T16:49:22.008Z'})
        self.maxDiff = None
        self.assertEqual(yaml.safe_load(self.ears.get()), message)

        with self.assertRaises(Exception):
            print(self.ears.get_nowait())

    def test_download_attachment(self):

        logging.info("*** download_attachment")

        class MySpace(SparkSpace):
            def name_attachment(self, url, token=None):
                return 'some_file.pdf'

            def get_attachment(self, url, token=None):
                return BytesIO(b'hello world')

        space = MySpace(context=self.context)
        outcome = space.download_attachment(url='/dummy')

        with open(outcome, "rb") as handle:
            self.assertEqual(handle.read(),
                             space.get_attachment('/dummy').read())

        try:
            os.remove(outcome)
        except:
            pass

    def test_name_attachment(self):

        logging.info("*** name_attachment")

        class MyResponse(object):
            def __init__(self, status_code=200, headers={}):
                self.status_code = status_code
                self.headers = headers

        self.space.token = None
        response = MyResponse(headers={'Content-Disposition': 'who cares'})
        self.assertEqual(
            self.space.name_attachment(url='/dummy', response=response),
            'downloadable')

        self.space.token = '*void'
        response = MyResponse(
            headers={'Content-Disposition': 'filename="some_file.pdf"'})
        self.assertEqual(
            self.space.name_attachment(url='/dummy', response=response),
            'some_file.pdf')

        self.space.token = None
        response = MyResponse(
            status_code=400,
            headers={'Content-Disposition': 'filename="some_file.pdf"'})
        with self.assertRaises(Exception):
            name = self.space.name_attachment(url='/dummy', response=response)

    def test_get_attachment(self):

        logging.info("*** get_attachment")

        class MyResponse(object):
            def __init__(self, status_code=200, headers={}):
                self.status_code = status_code
                self.headers = headers
                self.encoding = 'encoding'
                self.content = b'content'

        self.space.token = None
        response = MyResponse(headers={})
        content = self.space.get_attachment(url='/dummy',
                                            response=response).getvalue()
        self.assertEqual(content, b'content')

        self.space.token = '*void'
        response = MyResponse(headers={})
        content = self.space.get_attachment(url='/dummy',
                                            response=response).getvalue()
        self.assertEqual(content, b'content')

        self.space.token = None
        response = MyResponse(status_code=400, headers={})
        with self.assertRaises(Exception):
            content = self.space.get_attachment(url='/dummy',
                                                response=response)

    def test_on_join(self):

        logging.info("*** on_join")

        self.space.on_join(my_join, self.ears)
        item = my_join.copy()
        item.update({"type": "join"})
        item.update(
            {"actor_id": 'Y2lzY29zcGFyazovL3VRiMTAtODZkYy02YzU0Yjg5ODA5N2U'})
        item.update({"actor_address": '*****@*****.**'})
        item.update({"actor_label": 'Foo Bar'})
        item.update({
            "channel_id":
            'Y2lzY29zcGFyazovL3VzL1JP3LTk5MDAtMDU5MDI2YjBiNDUz'
        })
        item.update({"stamp": '2017-05-31T21:25:30.424Z'})
        self.maxDiff = None
        self.assertEqual(yaml.safe_load(self.ears.get()), item)

    def test_on_leave(self):

        logging.info("*** on_leave")

        self.space.on_leave(my_leave, self.ears)
        item = my_leave.copy()
        item.update({"type": "leave"})
        item.update(
            {"actor_id": 'Y2lzY29zcGFyazovL3VRiMTAtODZkYy02YzU0Yjg5ODA5N2U'})
        item.update({"actor_address": '*****@*****.**'})
        item.update({"actor_label": 'Foo Bar'})
        item.update({
            "channel_id":
            'Y2lzY29zcGFyazovL3VzL1JP3LTk5MDAtMDU5MDI2YjBiNDUz'
        })
        item.update({"stamp": '2017-05-31T21:25:30.424Z'})
        self.maxDiff = None
        self.assertEqual(yaml.safe_load(self.ears.get()), item)

    def test__to_channel(self):

        logging.info("*** _to_channel")

        channel = self.space._to_channel(FakeRoom())
        self.assertEqual(channel.id, '*id')
        self.assertEqual(channel.title, '*title')
        self.assertFalse(channel.is_direct)
        self.assertTrue(channel.is_group)
        self.assertFalse(channel.is_team)
        self.assertTrue(channel.is_moderated)

        channel = self.space._to_channel(FakeDirectRoom())
        self.assertEqual(channel.id, '*direct_id')
        self.assertEqual(channel.title, 'Marcel Jones')
        self.assertTrue(channel.is_direct)
        self.assertFalse(channel.is_group)
        self.assertFalse(channel.is_team)
        self.assertFalse(channel.is_moderated)

        channel = self.space._to_channel(FakeTeamRoom())
        self.assertEqual(channel.id, '*team_id')
        self.assertEqual(channel.title, '*team_title')
        self.assertFalse(channel.is_direct)
        self.assertTrue(channel.is_group)
        self.assertTrue(channel.is_team)
        self.assertFalse(channel.is_moderated)
Ejemplo n.º 57
0
class Controller(object):
    """
    Fuzzinator's main controller that orchestrates a fuzz session by scheduling
    all related activities (e.g., keeps SUTs up-to-date, runs fuzzers and feeds
    test cases to SUTs, or minimizes failure inducing test cases). All
    configuration options of the framework must be encapsulated in a
    :class:`configparser.ConfigParser` object.

    The following config sections and options are recognized:

      - Section ``fuzzinator``: Global settings of the framework.

        - Option ``work_dir``: Pattern of work directory for temporary files,
          which may contain the substring ``{uid}`` as a placeholder for a
          unique string (replaced by the framework). (Optional, default:
          ``~/.fuzzinator-{uid}``)

        - Option ``db_uri``: URI to a MongoDB database to store found issues and
          execution statistics. (Optional, default:
          ``mongodb://localhost/fuzzinator``)

        - Option ``cost_budget``: (Optional, default: number of cpus)

        - Option ``validate_after_update``: Boolean to enable the validation
          of valid issues of all SUTs after their update.
          (Optional, default: ``False``)

      - Sections ``sut.NAME``: Definitions of a SUT named *NAME*

        - Option ``call``: Fully qualified name of a python callable that must
          accept a ``test`` keyword argument representing the input to the SUT
          and must return a dictionary object if the input triggered an issue
          in the SUT, or a value considered false otherwise (which can be a
          simple ``None``, but can also be a ``NonIssue`` in complex cases).
          The returned issue dictionary (if any) *should* contain an ``'id'``
          field that equals for issues that are not considered unique.
          (Mandatory)

          See package :mod:`fuzzinator.call` for potential callables.

        - Option ``cost``: (Optional, default: 1)

        - Option ``reduce``: Fully qualified name of a python callable that must
          accept ``issue``, ``sut_call``, ``sut_call_kwargs``, ``listener``,
          ``ident``, ``work_dir`` keyword arguments representing an issue to be
          reduced (and various other potentially needed objects), and must
          return a tuple consisting of a reduced test case for the issue (or
          ``None`` if the issue's current test case could not be reduced) and a
          (potentially empty) list of new issues that were discovered during
          test case reduction (if any). (Optional, no reduction for this SUT if
          option is missing.)

          See package :mod:`fuzzinator.reduce` for potential callables.

        - Option ``reduce_call``: Fully qualified name of a python callable that
          acts as the SUT's ``call`` option during test case reduction.
          (Optional, default: the value of option ``call``)

          See package :mod:`fuzzinator.call` for potential callables.

        - Option ``reduce_cost``: (Optional, default: the value of option
          ``cost``)

        - Option ``validate_call``: Fully qualified name of a python callable
          that acts as the SUT's ``call`` option during test case validation.
          (Optional, default: the value of option ``reduce_call`` if defined,
          otherwise the value of option ``call``)

          See package :mod:`fuzzinator.call` for potential callables.

        - Option ``validate_cost``: (Optional, default: the value of option
          ``cost``)

        - Option ``update_condition``: Fully qualified name of a python callable
          that must return ``True`` if and only if the SUT should be updated.
          (Optional, SUT is never updated automatically if option is missing.)

          See package :mod:`fuzzinator.update` for potential callables.

        - Option ``update``: Fully qualified name of a python callable that
          should perform the update of the SUT. (Optional, SUT is never updated
          if option is missing.)

          See package :mod:`fuzzinator.update` for potential callables.

        - Option ``update_cost``: (Optional, default: the value of option
          ``fuzzinator:cost_budget``)

        - Option ``validate_after_update``: Boolean to enable the validation
          of the valid issues of the SUT after its update. (Optional, default:
          the value of option ``fuzzinator:validate_after_update``)

        - Option ``formatter``: Fully qualified name of a python callable that
          formats the issue dictionary of the SUT and returns a custom string
          representation. It must accept ``issue`` and ``format`` keyword
          arguments representing an issue to be formatted and a formatting
          instruction. If ``format`` is ``'long'`` or not specified, the issue
          should be formatted in full, while if ``'short'`` is given, a
          summary description (preferably a single line of text) should be
          returned.
          (Optional, default: :func:`fuzzinator.formatter.JsonFormatter`.)

          See package :mod:`fuzzinator.formatter` for further potential
          callables.

        - Option ``tui_formatter``: Fully qualified name of a python
          callable that formats the issue dictionary of the SUT to display
          it in the TUI issue viewer interface.
          (Optional, default: the value of option ``formatter``)

          See package :mod:`fuzzinator.formatter` for further potential
          callables.

        - Option ``email_formatter``: Fully qualified name of a python
          callable that formats the issue dictionary of the SUT to insert
          it into an e-mail notification.
          (Optional, default: the value of option ``formatter``)

          See package :mod:`fuzzinator.formatter` for further potential
          callables.

      - Sections ``fuzz.NAME``: Definitions of a fuzz job named *NAME*

        - Option ``sut``: Name of the SUT that describes the subject of
          this fuzz job. (Mandatory)

        - Option ``fuzzer``: Fully qualified name of a python callable that must
          accept an ``index`` keyword argument representing a running counter
          in the fuzz job and must return a test input (or ``None``, which
          signals that the fuzzer is "exhausted" and cannot generate more test
          cases in this fuzz job). The semantics of the generated test input is
          not restricted by the framework, it is up to the configuration to
          ensure that the SUT of the fuzz job can deal with the tests generated
          by the fuzzer of the fuzz job. (Mandatory)

          See package :mod:`fuzzinator.fuzzer` for potential callables.

        - Option ``batch``: Number of times the fuzzer is requested to generate
          a new test for the SUT. (Optional, default: 1)

        - Option ``instances``: Number of instances of this fuzz job allowed to
          run in parallel. (Optional, default: ``inf``)

        - Option ``refresh``: Statistics update frequency in terms of executed
          test cases. (Optional, default: ``batch`` size)

      - Section ``listeners``: Definitions of custom event listeners.
        This section is optional.

        - Options ``OPT``: Fully qualified name of a python class that
          executes custom actions for selected events.

        See package :mod:`fuzzinator.listeners` for potential listeners.

      - Callable options can be implemented as functions or classes with
        ``__call__`` method (the latter are instantiated first to get a callable
        object). Both constructor calls (if any) and the "real" calls can be
        given keyword arguments. These arguments have to be specified in
        sections ``(sut|fuzz).NAME.OPT[.init]`` with appropriate names (where
        the ``.init`` sections stand for the constructor arguments).

      - All callables can be decorated according to python semantics. The
        decorators must be callable classes themselves and have to be specified
        in options ``OPT.decorate(N)`` with fully qualified name. Multiple
        decorators can be applied to a callable ``OPT``, their order is
        specified by an integer index in parentheses. Keyword arguments to be
        passed to the decorators have to be listed in sections
        ``(sut|fuzz).NAME.OPT.decorate(N)``.

        See packages :mod:`fuzzinator.call` and :mod:`fuzzinator.fuzzer` for
        potential decorators.
    """
    def __init__(self, config):
        """
        :param configparser.ConfigParser config: the configuration options of the
            fuzz session.

        :ivar fuzzinator.ListenerManager listener: a listener manager object that is
            called on various events during the fuzz session.
        """
        self.config = config

        self.capacity = int(
            config_get_with_writeback(self.config, 'fuzzinator', 'cost_budget',
                                      str(os.cpu_count())))
        self.work_dir = config_get_with_writeback(
            self.config, 'fuzzinator', 'work_dir',
            os.path.join(os.getcwd(),
                         '.fuzzinator-{uid}')).format(uid=os.getpid())
        self.config.set('fuzzinator', 'work_dir', self.work_dir)
        self.fuzzers = config_get_fuzzers(self.config)
        self.validate_after_update = config_get_with_writeback(
            self.config, 'fuzzinator', 'validate_after_update',
            fallback=False) in [1, '1', True, 'True', 'true']

        self.db = MongoDriver(
            config_get_with_writeback(self.config, 'fuzzinator', 'db_uri',
                                      'mongodb://localhost/fuzzinator'))
        self.db.init_db(self.fuzzers)
        self.session_start = time.time()
        self.session_baseline = self.db.get_stats()

        self.listener = ListenerManager()
        for name in config_get_kwargs(self.config, 'listeners'):
            entity = import_entity(self.config.get('listeners', name))
            self.listener += entity(config=config,
                                    **config_get_kwargs(
                                        config, 'listeners.' + name + '.init'))

        self._shared_queue = Queue()
        self._shared_lock = Lock()

    def run(self, *, max_cycles=None):
        """
        Start the fuzz session.

        :param int max_cycles: maximum number to iterate through the fuzz jobs
            defined in the configuration (defaults to ``inf``).
        """
        max_cycles = max_cycles if max_cycles is not None else float('inf')
        cycle = 0
        fuzz_idx = 0
        fuzz_names = list(self.fuzzers)
        load = 0
        job_id = 0
        job_queue = []
        running_jobs = dict()

        def _update_load():
            current_load = 0
            for ident in list(running_jobs):
                if not running_jobs[ident]['proc'].is_alive(
                ) or not psutil.pid_exists(running_jobs[ident]['proc'].pid):
                    self.listener.remove_job(ident=ident)
                    del running_jobs[ident]
                else:
                    current_load += running_jobs[ident]['job'].cost

            nonlocal load
            if load != current_load:
                load = current_load
                self.listener.update_load(load=load)

        def _poll_jobs():
            with self._shared_lock:
                while not self._shared_queue.empty():
                    job_class, job_kwargs, priority = self._shared_queue.get_nowait(
                    )
                    if job_class is not None:
                        _add_job(job_class, job_kwargs, priority)
                    else:
                        _cancel_job(**job_kwargs)

        def _add_job(job_class, job_kwargs, priority):
            nonlocal job_id
            next_job = job_class(id=job_id,
                                 config=self.config,
                                 db=self.db,
                                 listener=self.listener,
                                 **job_kwargs)
            job_id += 1

            if priority:
                next_job.cost = 0

            {
                FuzzJob:
                lambda: self.listener.new_fuzz_job(ident=next_job.id,
                                                   cost=next_job.cost,
                                                   sut=next_job.sut_name,
                                                   fuzzer=next_job.fuzzer_name,
                                                   batch=next_job.batch),
                ValidateJob:
                lambda: self.listener.new_validate_job(ident=next_job.id,
                                                       cost=next_job.cost,
                                                       sut=next_job.sut_name,
                                                       issue_id=next_job.issue[
                                                           'id']),
                ReduceJob:
                lambda: self.listener.new_reduce_job(
                    ident=next_job.id,
                    cost=next_job.cost,
                    sut=next_job.sut_name,
                    issue_id=next_job.issue['id'],
                    size=len(str(next_job.issue['test']))),
                UpdateJob:
                lambda: self.listener.new_update_job(ident=next_job.id,
                                                     cost=next_job.cost,
                                                     sut=next_job.sut_name),
            }[job_class]()

            job_queue.insert(0 if priority else len(job_queue), next_job)

        def _cancel_job(ident):
            if ident in running_jobs:
                Controller.kill_process_tree(running_jobs[ident]['proc'].pid)
            else:
                ident_idx = [
                    job_idx for job_idx, job in enumerate(job_queue)
                    if job.id == ident
                ]
                if ident_idx:
                    self.listener.remove_job(ident=ident)
                    del job_queue[ident_idx[0]]

        try:
            while True:
                # Update load and poll added jobs (if any).
                _poll_jobs()
                _update_load()

                if fuzz_idx == 0:
                    cycle += 1
                if cycle > max_cycles or (not self.fuzzers
                                          and max_cycles != float('inf')):
                    while load > 0:
                        time.sleep(1)
                        _poll_jobs(
                        )  # only to let running jobs cancelled; newly added jobs don't get scheduled
                        _update_load()
                    break

                # Hunt for new issues only if there is no other work to do.
                if not job_queue:
                    if not self.fuzzers:
                        time.sleep(1)
                        continue

                    # Determine fuzz job to be queued and then update fuzz_idx
                    # to point to the next job's parameters.
                    fuzzer_name = fuzz_names[fuzz_idx]
                    fuzz_section = 'fuzz.' + fuzzer_name
                    fuzz_idx = (fuzz_idx + 1) % len(self.fuzzers)

                    # Skip fuzz job if limit on parallel instances is reached.
                    instances = self.config.get(fuzz_section,
                                                'instances',
                                                fallback='inf')
                    instances = float(
                        instances) if instances == 'inf' else int(instances)
                    if instances <= sum(1 for job in running_jobs.values()
                                        if isinstance(job['job'], FuzzJob) and
                                        job['job'].fuzzer_name == fuzzer_name):
                        continue

                    # Before queueing a new fuzz job, check if we are working
                    # with the latest version of the SUT and queue an update if
                    # needed.
                    sut_name = self.config.get(fuzz_section, 'sut')
                    update_condition, update_condition_kwargs = config_get_callable(
                        self.config, 'sut.' + sut_name, 'update_condition')
                    if update_condition:
                        with update_condition:
                            if update_condition(**update_condition_kwargs):
                                self.add_update_job(sut_name)

                    self.add_fuzz_job(fuzzer_name)

                    # Poll newly added job(s). Looping ensures that jobs will
                    # eventually arrive.
                    # (Unfortunately, multiprocessing.Queue.empty() is unreliable.)
                    while not job_queue:
                        _poll_jobs()

                # Perform next job as soon as there is enough capacity for it.
                while True:
                    if not job_queue:
                        next_job = None
                        break
                    if load + job_queue[0].cost <= self.capacity:
                        next_job = job_queue.pop(0)
                        break
                    time.sleep(1)
                    _poll_jobs()
                    _update_load()
                if not next_job:
                    continue

                proc = Process(target=self._run_job, args=(next_job, ))
                running_jobs[next_job.id] = dict(job=next_job, proc=proc)
                self.listener.activate_job(ident=next_job.id)
                proc.start()

        except KeyboardInterrupt:
            pass
        except Exception as e:
            self.listener.warning(
                ident=None,
                msg='Exception in the main controller loop: {exception}\n{trace}'
                .format(exception=e, trace=traceback.format_exc()))
        finally:
            Controller.kill_process_tree(os.getpid(), kill_root=False)
            if os.path.exists(self.work_dir):
                shutil.rmtree(self.work_dir, ignore_errors=True)

    def _run_job(self, job):
        try:
            for issue in job.run():
                # Automatic reduction and/or validation if the job found something new
                if not self.add_reduce_job(issue=issue):
                    self.add_validate_job(issue=issue)
        except Exception as e:
            self.listener.warning(
                ident=job.id,
                msg='Exception in {job}: {exception}\n{trace}'.format(
                    job=repr(job), exception=e, trace=traceback.format_exc()))

    def add_fuzz_job(self, fuzzer_name, priority=False):
        # Added for the sake of completeness and consistency.
        # Should not be used by UI to add fuzz jobs.
        with self._shared_lock:
            self._shared_queue.put(
                (FuzzJob,
                 dict(fuzzer_name=fuzzer_name,
                      subconfig_id=self.fuzzers[fuzzer_name]['subconfig']),
                 priority))
        return True

    def add_validate_job(self, issue, priority=False):
        if not self.config.has_section('sut.' + issue['sut']):
            return False

        with self._shared_lock:
            self._shared_queue.put((ValidateJob, dict(issue=issue), priority))
        return True

    def add_reduce_job(self, issue, priority=False):
        if not self.config.has_option('sut.' + issue['sut'], 'reduce'):
            return False

        with self._shared_lock:
            self._shared_queue.put((ReduceJob, dict(issue=issue), priority))
        return True

    def add_update_job(self, sut_name, priority=False):
        if not self.config.has_option('sut.' + sut_name, 'update'):
            return False

        with self._shared_lock:
            self._shared_queue.put(
                (UpdateJob, dict(sut_name=sut_name), priority))

        if self.config.get('sut.' + sut_name,
                           'validate_after_update',
                           fallback=self.validate_after_update) in [
                               1, '1', True, 'True', 'true'
                           ]:
            self.validate_all(sut_name)

        return True

    def validate_all(self, sut_name=None):
        sut_name = [sut_name] if sut_name else [
            section for section in self.config.sections()
            if section.startswith('sut.') and section.count('.') == 1
        ]
        for issue in self.db.find_issues_by_suts(sut_name):
            if not issue.get('invalid'):
                self.add_validate_job(issue)

    def reduce_all(self, sut_name=None):
        sut_name = [sut_name] if sut_name else [
            section for section in self.config.sections()
            if section.startswith('sut.') and section.count('.') == 1
        ]
        for issue in self.db.find_issues_by_suts(sut_name):
            if not issue.get('reported') and not issue.get('reduced'):
                self.add_reduce_job(issue)

    def cancel_job(self, ident):
        with self._shared_lock:
            self._shared_queue.put((None, dict(ident=ident), None))
        return True

    @staticmethod
    def kill_process_tree(pid, kill_root=True, sig=signal.SIGTERM):
        try:
            root_proc = psutil.Process(pid)
            children = root_proc.children(recursive=True)
            if kill_root:
                children.append(root_proc)
            for proc in children:
                # Would be easier to use proc.terminate() here but psutils
                # (up to version 5.4.0) on Windows terminates processes with
                # the 0 signal/code, making the outcome of the terminated
                # process indistinguishable from a successful execution.
                try:
                    os.kill(proc.pid, sig)
                except OSError:
                    pass
            psutil.wait_procs(children, timeout=1)
        except psutil.NoSuchProcess:
            pass
Ejemplo n.º 58
0
class LogGenerator(Cmd):
    def __init__(self):
        super().__init__(use_ipython=False)
        self.debug = True
        self._file = "/tmp/access.log"
        self.prompt = "loggen ({}) > ".format(self._file)
        self._user_count = 10
        self._host_count = 5
        self._section_count = 5
        self._log_datetime_format = "%d/%b/%Y:%H:%M:%S %z"
        self._proc_pool = None
        self._proc_queue = Queue()
        self._running = False
        self._update_prompt()

    def do_file(self, file_path) -> None:
        if not file_path:
            LogGenerator.create_file(file_path)
        if not (os.path.isfile(file_path) and os.access(file_path, os.W_OK)):
            self.perror("Missing or inaccessible file: {}".format(file_path))
            return
        else:
            self._file = file_path
            self._update_prompt()

    def do_truncate(self, _) -> None:
        was_running = self._running
        self._stop_write()
        LogGenerator.truncate_file(self._file)
        if was_running:
            self._start_write()

    def do_rotate(self, _) -> None:
        was_running = self._running
        self._stop_write()
        rotated_file = self._file + "_next"
        if os.path.isfile(rotated_file):
            os.remove(rotated_file)
        os.rename(self._file, rotated_file)
        LogGenerator.create_file(self._file)
        if was_running:
            self._start_write()

    def do_start(self, _) -> None:
        self._start_write()

    def do_stop(self, _) -> None:
        self._stop_write()

    def do_exit(self, _) -> bool:
        return True

    def _start_write(self):
        while not self._proc_queue.empty():
            self._proc_queue.get_nowait()
        pool = Pool(1, logwriter.append_http_log, (
            self._proc_queue,
            self._file,
            self._log_datetime_format,
            self._host_count,
            self._user_count,
            self._section_count,
        ))
        self._running = True
        self._proc_pool = pool
        self._update_prompt()

    def _stop_write(self):
        self._proc_queue.put("")
        if self._proc_pool:
            self._proc_pool.close()
            self._proc_pool.join()
        self._running = False
        self._update_prompt()

    def _update_prompt(self):
        self.prompt = "{} | {} > ".format(
            'writing' if self._running else 'idle', self._file)

    @staticmethod
    def truncate_file(file_path) -> None:
        open(file_path, "w").close()

    @staticmethod
    def create_file(file_path) -> None:
        open(file_path, 'a').close()
Ejemplo n.º 59
0
    def __init__(
        self,
        channel: int,
        in_q: multiprocessing.Queue,
        out_q: multiprocessing.Queue,
        server_state: StateManager,
    ):

        process_title = "Player: Channel " + str(channel)
        setproctitle.setproctitle(process_title)
        multiprocessing.current_process().name = process_title

        self.running = True
        self.out_q = out_q

        self.logger = LoggingManager(
            "Player" + str(channel), debug=package.BETA)

        self.api = MyRadioAPI(self.logger, server_state)

        self.state = StateManager(
            "Player" + str(channel),
            self.logger,
            self.__default_state,
            self.__rate_limited_params,
        )

        self.state.update("start_time", datetime.now().timestamp())

        self.state.add_callback(self._send_status)

        self.state.update("channel", channel)
        self.state.update("tracklist_mode", server_state.get()[
                          "tracklist_mode"])
        self.state.update(
            "live", True
        )  # Channel is live until controller says it isn't.

        # Just in case there's any weights somehow messed up, let's fix them.
        plan_copy: List[PlanItem] = copy.copy(self.state.get()["show_plan"])
        self._fix_and_update_weights(plan_copy)

        loaded_state = copy.copy(self.state.state)

        if loaded_state["output"]:
            self.logger.log.info("Setting output to: " +
                                 str(loaded_state["output"]))
            self.output(loaded_state["output"])
        else:
            self.logger.log.info("Using default output device.")
            self.output()

        loaded_item = loaded_state["loaded_item"]
        if loaded_item:
            # No need to load on init, the output switch does this, as it would for regular output switching.
            # self.load(loaded_item.weight)

            # Load may jump to the cue point, as it would do on a regular load.
            # If we were at a different state before, we have to override it now.
            if loaded_state["pos_true"] != 0:
                self.logger.log.info(
                    "Seeking to pos_true: " + str(loaded_state["pos_true"])
                )
                try:
                    self.seek(loaded_state["pos_true"])
                except error:
                    self.logger.log.error("Failed to seek on player start. Continuing anyway.")

            if loaded_state["playing"] is True:
                self.logger.log.info("Resuming playback on init.")
                # Use un-pause as we don't want to jump to a new position.
                try:
                    self.unpause()
                except error:
                    self.logger.log.error("Failed to unpause on player start. Continuing anyway.")
        else:
            self.logger.log.info("No file was previously loaded to resume.")

        try:
            while self.running:
                self._updateState()
                self._ping_times()
                try:
                    message = in_q.get_nowait()
                    source = message.split(":")[0]
                    if source not in VALID_MESSAGE_SOURCES:
                        self.last_msg_source = ""
                        self.last_msg = ""
                        self.logger.log.warn(
                            "Message from unknown sender source: {}".format(
                                source)
                        )
                        continue

                    self.last_msg_source = source
                    self.last_msg = message.split(":", 1)[1]

                    self.logger.log.debug(
                        "Recieved message from source {}: {}".format(
                            self.last_msg_source, self.last_msg
                        )
                    )
                except Empty:
                    # The incomming message queue was empty,
                    # skip message processing

                    # If we're getting no messages, sleep.
                    # But if we do have messages, once we've done with one, we'll check for the next one more quickly.
                    time.sleep(0.05)
                else:

                    # We got a message.

                    ## Check if we're successfully loaded
                    # This is here so that we can check often, but not every single loop
                    # Only when user gives input.
                    self._checkIsLoaded()

                    # Output re-inits the mixer, so we can do this any time.
                    if self.last_msg.startswith("OUTPUT"):
                        split = self.last_msg.split(":")
                        self._retMsg(self.output(split[1]))

                    elif self.isInit:
                        message_types: Dict[
                            str, Callable[..., Any]
                        ] = {  # TODO Check Types
                            "STATUS": lambda: self._retMsg(self.status, True),
                            # Audio Playout
                            # Unpause, so we don't jump to 0, we play from the current pos.
                            "PLAY": lambda: self._retMsg(self.unpause()),
                            "PAUSE": lambda: self._retMsg(self.pause()),
                            "PLAYPAUSE": lambda: self._retMsg(
                                self.unpause() if not self.isPlaying else self.pause()
                            ),  # For the hardware controller.
                            "UNPAUSE": lambda: self._retMsg(self.unpause()),
                            "STOP": lambda: self._retMsg(
                                self.stop(user_initiated=True)
                            ),
                            "SEEK": lambda: self._retMsg(
                                self.seek(float(self.last_msg.split(":")[1]))
                            ),
                            "AUTOADVANCE": lambda: self._retMsg(
                                self.set_auto_advance(
                                    (self.last_msg.split(":")[1] == "True")
                                )
                            ),
                            "REPEAT": lambda: self._retMsg(
                                self.set_repeat(self.last_msg.split(":")[1])
                            ),
                            "PLAYONLOAD": lambda: self._retMsg(
                                self.set_play_on_load(
                                    (self.last_msg.split(":")[1] == "True")
                                )
                            ),
                            # Show Plan Items
                            "GETPLAN": lambda: self._retMsg(
                                self.get_plan(int(self.last_msg.split(":")[1]))
                            ),
                            "LOAD": lambda: self._retMsg(
                                self.load(int(self.last_msg.split(":")[1]))
                            ),
                            "LOADED?": lambda: self._retMsg(self.isLoaded),
                            "UNLOAD": lambda: self._retMsg(self.unload()),
                            "ADD": lambda: self._retMsg(
                                self.add_to_plan(
                                    json.loads(
                                        ":".join(self.last_msg.split(":")[1:]))
                                )
                            ),
                            "REMOVE": lambda: self._retMsg(
                                self.remove_from_plan(
                                    int(self.last_msg.split(":")[1]))
                            ),
                            "CLEAR": lambda: self._retMsg(self.clear_channel_plan()),
                            "SETMARKER": lambda: self._retMsg(
                                self.set_marker(
                                    self.last_msg.split(":")[1],
                                    self.last_msg.split(":", 2)[2],
                                )
                            ),
                            "RESETPLAYED": lambda: self._retMsg(
                                self.set_played(
                                    weight=int(self.last_msg.split(":")[1]),
                                    played=False,
                                )
                            ),
                            "SETPLAYED": lambda: self._retMsg(
                                self.set_played(
                                    weight=int(self.last_msg.split(":")[1]), played=True
                                )
                            ),
                            "SETLIVE": lambda: self._retMsg(
                                self.set_live(
                                    self.last_msg.split(":")[1] == "True")
                            ),
                        }

                        message_type: str = self.last_msg.split(":")[0]

                        if message_type in message_types.keys():
                            message_types[message_type]()

                        elif self.last_msg == "QUIT":
                            self._retMsg(True)
                            self.running = False
                            continue

                        else:
                            self._retMsg("Unknown Command")
                    else:

                        if self.last_msg == "STATUS":
                            self._retMsg(self.status)
                        else:
                            self._retMsg(False)

        # Catch the player being killed externally.
        except KeyboardInterrupt:
            self.logger.log.info("Received KeyboardInterupt")
        except SystemExit:
            self.logger.log.info("Received SystemExit")
        except Exception as e:
            self.logger.log.exception(
                "Received unexpected Exception: {}".format(e))

        self.logger.log.info("Quiting player " + str(channel))
        self.quit()
        self._retAll("QUIT")
        del self.logger
        os._exit(0)
    def run_multiprocess_search(self, paramether_dictionary_list,
                                num_cases_max):

        # Te following function runs the search in parallel. As different configurations might have signifiantly divergent
        # runtime threads must be joined from the first to terminate and the objects might be big, therefore parallel.pool is not suitable

        num_cases_evaluated = 0
        num_cases_started = 0
        num_cases_active = 0
        termination_sent = False

        process_list = [None] * self.parallelPoolSize

        queue_job_todo = Queue()
        queue_job_done = Queue()

        get_memory_threshold_reached_partial = partial(
            get_memory_threshold_reached,
            max_ram_occupied_perc=self.max_ram_occupied_perc)

        for current_process_index in range(self.parallelPoolSize):
            newProcess = multiprocessing.Process(
                target=process_worker,
                args=(
                    queue_job_todo,
                    queue_job_done,
                    current_process_index,
                    get_memory_threshold_reached_partial,
                ))

            process_list[current_process_index] = newProcess

            newProcess.start()
            newProcess = None

            print("Started process: {}".format(current_process_index))

        memory_threshold_reached, memory_used_quota = get_memory_threshold_reached(
            self.max_ram_occupied_perc)

        while num_cases_evaluated < num_cases_max:

            # Create as many new jobs as needed
            # Stop:     if the max number of paralle processes is reached or the max ram occupancy is reached
            #           if no other cases to explore
            # If no termination sent and active == 0, start one otherwise everything stalls
            # WARNING: apparently the function "queue_job_todo.empty()" is not reliable
            while (
                        (num_cases_active < self.parallelPoolSize and not memory_threshold_reached) or (
                                num_cases_active == 0)) \
                    and not termination_sent:

                memory_threshold_reached, memory_used_quota = get_memory_threshold_reached(
                    self.max_ram_occupied_perc)

                if memory_threshold_reached:
                    writeLog(
                        self.ALGORITHM_NAME +
                        ": Memory threshold reached, occupied {:.4f} %\n".
                        format(memory_used_quota), self.logFile)

                if num_cases_started < num_cases_max and not memory_threshold_reached:
                    process_object = Process_object_data_and_evaluation(
                        self.recommender_class, self.dictionary_input,
                        paramether_dictionary_list[num_cases_started],
                        self.ALGORITHM_NAME, self.URM_validation,
                        self.evaluation_function)

                    queue_job_todo.put(process_object)
                    num_cases_started += 1
                    num_cases_active += 1
                    process_object = None
                    gc.collect()

                if num_cases_started >= num_cases_max and not termination_sent:
                    print("Termination sent")
                    queue_job_todo.put(None)
                    termination_sent = True
                gc.collect()

            # Read all completed jobs. WARNING: apparently the function "empty" is not reliable
            queue_job_done_is_empty = False

            while not queue_job_done_is_empty:

                try:
                    process_object = queue_job_done.get_nowait()

                    # self.update_on_new_result(process_object, num_cases_evaluated)
                    num_cases_evaluated += 1
                    num_cases_active -= 1
                    process_object = None

                except Empty:
                    queue_job_done_is_empty = True

            time.sleep(1)
            gc.collect()
            # print("num_cases_evaluated {}".format(num_cases_evaluated))

            # print("Evaluated {}, started {}, active {}".format(num_cases_evaluated, num_cases_started, num_cases_active))

        queue_job_todo.get()

        for current_process in process_list:
            # print("Waiting to Join {}".format(current_process))
            current_process.join()
            print("Joined {}".format(current_process))