Exemplo n.º 1
0
class Transcoder:
    def __init__(self):
        self.stopping = Event()

    def stop(self):
        logger.debug("Preventing new transcoding processes.")
        self.stopping.set()

    def transcode(self, path, format='mp3', bitrate=False):
        if self.stopping.is_set():
            return
        try:
            stop = Event()
            start_time = time.time()
            parent_conn, child_conn = Pipe()
            process = Process(target=transcode_process,
                    args=(child_conn, path, stop, format, bitrate))
            process.start()
            while not (self.stopping.is_set() or stop.is_set()):
                data = parent_conn.recv()
                if not data:
                    break
                yield data
            logger.debug("Transcoded %s in %0.2f seconds." % (path.encode(cfg['ENCODING']), time.time() - start_time))
        except GeneratorExit:
            stop.set()
            logger.debug("User canceled the request during transcoding.")
        except:
            stop.set()
            logger.warn("Some type of error occured during transcoding.")
        finally:
            parent_conn.close()
            process.join()
Exemplo n.º 2
0
    def runscripts(self,team):
        ip = team['ip']
        if team['team_id'] not in self.run_list:
            self.log.info('No script to run for %s.'%(str(team)))
            return

        self.status['script_tot']=self.status['script_tot']+ len(self.run_list[team['team_id']])

        #sort by service
        ss = {}
        for sid in self.run_list[team['team_id']]:
            sid = int(sid)
            s = self.scripts[sid]
            srvid = s['service_id']
            if srvid in ss:
                ss[srvid].append(sid)
            else:
                ss[srvid] = [sid]
        
        #randomize delay
        for srvid,slist in ss.iteritems():
            #per service
            slock = Event()
            slock.set()
            self.locks.append(slock)
            rlist = self.get_rand_delay(slist)
            for sid,delay in rlist:
                p = self.update_script(team['team_id'],sid,self.scripts[sid]['is_bundle'])
                if p is None:
                    continue
                s = self.scripts[sid]
                self.runscript(slock,team['team_id'],sid,s['service_id'],SCRIPT_TIMEOUT,s['type'],p,ip,self.services[s['service_id']]['port'],delay)
Exemplo n.º 3
0
class CaptureProcess(Process):
    """A process that fills a queue with images as captured from 
    a camera feed"""
    def __init__(self, capture, imageQueue):
        Process.__init__(self, name="Capture")
        self.imageQueue = imageQueue
        self.capture = capture
        self.keepGoing = Event()
        self.keepGoing.set()
        self.daemon = True

    def run(self):
        print "CaptureProcess pid: %s" % (self.pid,)
        while self.keepGoing.is_set():
            image = captureImage(self.capture)
#            sys.stdout.write(".")
            try:
                self.imageQueue.put(serializeImage(image), block=True, timeout=0.25)
            except FullException:
                try:
                    _ = self.imageQueue.get_nowait()
                except:
                    pass  # Try to clear the queue, but don't worry if someone snatches it first
    def stop(self):
        self.keepGoing.clear()
Exemplo n.º 4
0
class DataProcess(Process):
    def __init__(self, data_pipeline, **get_batch_kwargs):
        super(DataProcess, self).__init__(name='neuralnilm-data-process')
        self._stop = Event()
        self._queue = Queue(maxsize=3)
        self.data_pipeline = data_pipeline
        self._get_batch_kwargs = get_batch_kwargs

    def run(self):
        batch = self.data_pipeline.get_batch(**self._get_batch_kwargs)
        while not self._stop.is_set():
            try:
                self._queue.put(batch)
            except AssertionError:
                # queue is closed
                break
            batch = self.data_pipeline.get_batch(**self._get_batch_kwargs)

    def get_batch(self, timeout=30):
        if self.is_alive():
            return self._queue.get(timeout=timeout)
        else:
            raise RuntimeError("Process is not running!")

    def stop(self):
        self._stop.set()
        self._queue.close()
        self.terminate()
        self.join()
Exemplo n.º 5
0
class BaseWorker(Process):
    def __init__(self, *args, **kwargs):
        super(BaseWorker, self).__init__(*args, **kwargs)
        self.should_exit = Event()

    def shutdown(self):
        self.should_exit.set()
Exemplo n.º 6
0
class ServerProc(object):
    def __init__(self):
        self.proc = None
        self.daemon = None
        self.stop = Event()

    def start(self, init_func, config, paths, port):
        self.proc = Process(target=self.create_daemon, args=(init_func, config, paths, port))
        self.proc.daemon = True
        self.proc.start()

    def create_daemon(self, init_func, config, paths, port):
        try:
            self.daemon = init_func(config, paths, port)
        except socket.error:
            logger.error("Socket error on port %s" % port)
            raise

        if self.daemon:
            self.daemon.start(block=False)
            try:
                self.stop.wait()
            except KeyboardInterrupt:
                pass

    def wait(self):
        self.stop.set()
        self.proc.join()

    def kill(self):
        self.stop.set()
        self.proc.terminate()
        self.proc.join()
Exemplo n.º 7
0
class World (Process):
    """ A group of particles that can interact with each other """

    def __init__(self, plane, send, update_rate=60):
        Process.__init__(self)
        self.plane = plane
        self.send = send
        self.update_interval = 1/update_rate
        self.exit = Event()
        logger.info('Initialised')

    def run(self):
        logger.info('Running')
        previous_update = current_time()
        while not self.exit.is_set():
            # Update each particle
            for particle in self.plane:
                particle.update()

            # stuff and things
            self.send.send(self.plane)

            # Sleep to maintain update rate
            update_delay = self.update_interval - (current_time() - previous_update)
            previous_update = current_time()
            sleep(max(0, update_delay))

    def terminate(self):
        logger.info('Exiting')
        self.exit.set()
Exemplo n.º 8
0
class Worker(Process):
    def __init__(self, buffer, reorder_buffer, job):
        Process.__init__(self)
        self.buffer = buffer
        self.reorder_buffer = reorder_buffer
        self.job = job
        self.event = Event()

    def run(self):
        self.event.set()
        while self.event.is_set():
            try:
                block_number, data = self.buffer.get()
            except IOError, e:
                if e.errno == errno.EINTR:
                    data = EOF

            if data == EOF:
                self.stop()
                break
            worked_data = self.job(data)

            while self.event.is_set():
                try:
                    self.reorder_buffer.put(block_number, worked_data)
                    break
                except IndexError:
                    # Block num bigger than expected,
                    # wait untill ReorderBuffer start
                    # processing blocks in this range
                    time.sleep(0.1)
                except IOError, e:
                    if e.errno == errno.EINTR:
                        self.stop()
Exemplo n.º 9
0
class New_Process_Actor(Actor):
    '''Create an Actor in a new process. Connected as usual with scipysim 
    channels. When this Actor is started, it launches a new process, creates
    an instance of the Actor class passed to it in a second thread, and starts
    that actor.
    '''
    def __init__(self, cls, *args, **kwargs):
        super(New_Process_Actor, self).__init__()
        self.cls = cls
        self.args = list(args)
        self.kwargs = kwargs
        self.mqueue = MQueue()
        self.mevent = MEvent()
        
        if 'input_channel' not in kwargs:
            kwargs['input_channel'] = self.args[0]
        
        chan = kwargs['input_channel']
        kwargs['input_channel'] = self.mqueue
        
        
        print 'chan: ', chan
        self.c2p = Channel2Process(chan, self.mevent, self.mqueue)
        
        self.c2p.start()


    def run(self):
        self.t = Process(target=target, args=(self.cls, self.args, self.kwargs))
        self.t.start()
        self.mevent.set() # signal that process is ready to receive
        self.c2p.join()
        self.t.join()
Exemplo n.º 10
0
class MistProcess(Process):

    def __init__(self, gpio, sleep=1, name='MistProcess'):
        Process.__init__(self, name=name)
        self.logger = multiprocessing.get_logger()
        self.event = Event()
        self.name = name
        self.gpio = gpio
        self.sleep = sleep
        self.mist = mraa.Gpio(self.gpio)
        self.mist.dir(mraa.DIR_OUT)

    def _mist_on(self):
        self.logger.debug('Mist on')
        self.mist.write(1)

    def _mist_off(self):
        self.logger.debug('Mist off')
        if self.mist:
            self.mist.write(0)

    def run(self):
        self.event.set()
        self.logger.debug('PID: %d' % multiprocessing.current_process().pid)

        while self.event.is_set():
            self._mist_on()
            time.sleep(self.sleep)

    def stop(self):
        self.logger.debug('Process {} will halt.'.format(self.name))
        self.event.clear()
        self._mist_off()
Exemplo n.º 11
0
def test_sentinel():
    start_event = Event()
    stop_event = Event()
    stop_event.set()
    s = Sentinel(stop_event, start_event, list_key='sentinel_test:q')
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
Exemplo n.º 12
0
def test_cluster(r):
    list_key = 'cluster_test:q'
    r.delete(list_key)
    task = async('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, list_key=list_key)
    assert queue_size(list_key=list_key, r=r) == 1
    task_queue = Queue()
    assert task_queue.qsize() == 0
    result_queue = Queue()
    assert result_queue.qsize() == 0
    event = Event()
    event.set()
    # Test push
    pusher(task_queue, event, list_key=list_key, r=r)
    assert task_queue.qsize() == 1
    assert queue_size(list_key=list_key, r=r) == 0
    # Test work
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1))
    assert task_queue.qsize() == 0
    assert result_queue.qsize() == 1
    # Test monitor
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # check result
    assert result(task) == 1506
    r.delete(list_key)
Exemplo n.º 13
0
class DataLoaderOnTheGround():
    def __init__(self, config):
        default_config = Config(proc_count = 4)
        self.config = default_config(**config)
        self.exit = Event()
        self.task_list = config.task_list
        self.task_queue = Queue(maxsize = 10)
        self.batch_queue = Queue(maxsize =  10)
        self.workers = []
        self.distributor = Process(target = task_distributor, args = (self,))
        for _ in range(self.config.proc_count):
            self.workers.append(Process(target = config.worker, args = (self,)))

        self.distributor.daemon = True
        self.distributor.start()
        for w in self.workers:
            w.daemon = True
            w.start()
    def next_batch(self):
        return Config(self.batch_queue.get())
    def __del__(self):
        self.exit.set()
        self.distributor.join()
        for w in self.workers:
            w.join()
Exemplo n.º 14
0
def test_sentinel():
    start_event = Event()
    stop_event = Event()
    stop_event.set()
    s = Sentinel(stop_event, start_event, broker=get_broker('sentinel_test:q'))
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
Exemplo n.º 15
0
    def _process(self, worker_count, hard, permissions_only=False):
        idxs = ["allowedRolesAndUsers"] if permissions_only else []
        terminator = Event()
        toggle_debug = Event()

        def handle_SIGUSR1(signum, frame):
            if toggle_debug.is_set():
                toggle_debug.clear()
            else:
                toggle_debug.set()

        signal.signal(signal.SIGUSR1, handle_SIGUSR1)

        p = Process(
            target=_run_model_catalog_init,
            args=(worker_count, hard, idxs, terminator, toggle_debug)
        )
        try:
            p.start()
            p.join()
        except KeyboardInterrupt:
            log.info("Received signal to terminate, stopping subprocess")
            terminator.set()
            p.join(90)
            if p.is_alive():
                log.info("Timeout waiting for subprocess to exit gracefully")
                p.terminate()
Exemplo n.º 16
0
class DataLoaderOnTheFly():
    def __init__(self, config):
        default_config = Config(proc_count = 4, limit_batch_count = None)
        self.config = default_config(**config)
        self.exit = Event()
        self.batch_queue = Queue(maxsize = 10)
        if self.config.limit_batch_count is None:
            self.limited = False
        else:
            self.limited = True
            self.batch_list = []
            self.index = -1
        self.workers = []
        for _ in range(self.config.proc_count):
            self.workers.append(Process(target = config.worker, args = (self,)))
        for w in self.workers:
            w.daemon = True
            w.start()
    def next_batch(self):
        if self.limited:
            if len(self.batch_list) < self.config.limit_batch_count:
                self.batch_list.append(Config(self.batch_queue.get()))
            self.index = (self.index + 1) % self.config.limit_batch_count
            return Config(self.batch_list[self.index])
        else:
            return Config(self.batch_queue.get())
    def __del__(self):
        self.exit.set()
        for w in self.workers:
            w.join()
    def testThreadChecker(self):
        stop_event = Event()
        link = "munichre.com"
        checker = SiteThreadChecker(full_link=link, thread_pool_size=3, max_page=3000, max_level=10)

        def crawl():
            checker.crawling()

        queue_server_t = Process(target=run_queue_server)
        queue_server_t.start()
        output_t = Process(target=single_output, args=(stop_event,))
        output_t.start()
        # link = "http://sweetlifebake.com/#axzz3t4Nx7b7N"
        crawl_t = Thread(target=crawl)
        crawl_t.start()
        timeout = 1000
        counter = 0
        while counter < timeout:
            time.sleep(1)
            counter += 1
        print("is going to sudden death.")
        stop_event.set()
        checker.sudden_death()
        if crawl_t.is_alive():
            crawl_t.join()
        output_t.terminate()
        queue_server_t.terminate()

        print("finished")
Exemplo n.º 18
0
    def recog_proc(self, child_recog: Pipe, e_recog: Event, yolo_type: str):
        """
        Parallel process for object recognition

        Arguments:
            child_recog {Pipe} -- pipe for communication with parent process,
                sends bbox yolo type of recognized object
            e_recog {Event} -- event for indicating complete recognize in frame
        """

        # initialize YOLO
        yolo = Yolo(yolo_type)
        e_recog.set()
        print("yolo defined")

        while True:
            frame = child_recog.recv()
            print("recog process frame recieved")
            if frame is None:
                print("FRAME NONE? R U SURE ABOUT THAT?!")
                return
            res = yolo.detect(frame, cvmat=True)
            print("recog send")
            e_recog.set()
            child_recog.send(res)
Exemplo n.º 19
0
class Logger(object):
    def __init__(self, filename):
        self.qtag = Queue()
        self.done = Event()
        self.tag = None
        self.filename = filename
        self.file = None
    def start(self):
        self.file = open(self.filename, 'w')
        print 'Opened',self.filename,'for writing.'
    def set_tag(self, tag):
        self.qtag.put(tag)
    def set_done(self):
        self.done.set()
    def log(self, nodeid, msgid, data):
        if not self.qtag.empty():
            self.tag = self.qtag.get()
        if self.done.is_set():
            self.done.clear()
            return True
        L = ['%f'%time.time(), '%d'%nodeid, '%d'%msgid] + map(str,data)
        if self.tag:
            L.append(self.tag)
        print >>self.file, ','.join(L)
        self.file.flush()
    def close(self):
        if self.file:
            self.file.close()
            print 'File closed.'
    def run(self):
        logger = self.ipc_logger()
        input_queue = Queue(20 * self.n_processes)
        done_event = Event()
        processes = [
            ProteinDigestingProcess(
                self.connection, self.hypothesis_id, input_queue,
                self.digestor, done_event=done_event,
                message_handler=logger.sender()) for i in range(
                self.n_processes)
        ]
        protein_ids = self.protein_ids
        i = 0
        n = len(protein_ids)
        chunk_size = 2
        interval = 30
        for process in processes:
            input_queue.put(protein_ids[i:(i + chunk_size)])
            i += chunk_size
            process.start()

        last = i
        while i < n:
            input_queue.put(protein_ids[i:(i + chunk_size)])
            i += chunk_size
            if i - last > interval:
                self.log("... Dealt Proteins %d-%d %0.2f%%" % (
                    i - chunk_size, min(i, n), (min(i, n) / float(n)) * 100))
                last = i

        done_event.set()
        for process in processes:
            process.join()
        logger.stop()
Exemplo n.º 21
0
def main():
    # Use alphazero self-play for data generation
    agents_meta = parse_schedule() 

    # worker variable of main process
    board = Board()
    sigexit = Event()
    sigexit.set()  # pre-set signal so main proc generator will iterate only once

    # subprocess data generator
    helper = DataHelper(data_files=[])
    helper.set_agents_meta(agents_meta=agents_meta)     
    generator = helper.generate_batch(TRAINING_CONFIG["batch_size"])

    # start generating
    with h5py.File(f"{DATA_CONFIG['data_path']}/latest.train.hdf5", 'a') as hf:    
        for state_batch, value_batch, probs_batch in generator:
            for batch_name in ("state_batch", "value_batch", "probs_batch"):
                if batch_name not in hf:
                    shape = locals()[batch_name].shape
                    hf.create_dataset(batch_name, (0, *shape), maxshape=(None, *shape))
                hf[batch_name].resize(hf[batch_name].shape[0] + 1, axis=0)
                hf[batch_name][-1] = locals()[batch_name]

            # prevent main proc from generating data too quick
            # since sigexit has been set, proc will iterate only once
            run_proc(helper.buffer, helper.buffer_size, helper.lock,
                     sigexit, agents_meta, board) 
            board.reset()
Exemplo n.º 22
0
class DeviceServer(ThreadedTCPServer, Process):
	
	#causes handle_request to return
	timeout = 1
	
	def __init__(self, mux, muxdevice, server_address, RequestHandlerClass):
		Process.__init__(self)
		ThreadedTCPServer.__init__(self, server_address, RequestHandlerClass)
		self.mux = mux
		self.muxdev = muxdevice
		self._stop = Event()

	def stop(self):
		self._stop.set()
		
	def stopped(self):
		return self._stop.is_set()

	def run(self):
		if self.stopped():
			_LOGGER.warning("Thread already stopped")
		
		while not self.stopped():
			self.handle_request()
		self.socket.close()
		_LOGGER.debug("%s will exit now" % (str(self)))
Exemplo n.º 23
0
class ClassifierWorkerPool(object):
    def __init__(self):
        self.queue = Queue(100)
        self.workers = []
        self.stop = Event()
        self.stop.clear()
        self.queue_feeder = QueueFeeder(self.queue, self.stop)

        row = TrainedClassifiers.objects(name=config.classifier).first()

        if not row:
            raise Exception("Classifier %s does not exists" % config.classifier)

        self.trained_classifier = row.get_classifier()

    def start(self):
        self.queue_feeder.start()

        for i in range(0, config.classifier_pool_size):
            worker = ClassifierWorker(self.trained_classifier, self.queue, self.stop)
            worker.start()
            self.workers.append(worker)

    def terminate(self):
        self.stop.set()
        self.queue_feeder.join()
        for w in self.workers:
            w.join()
class QueueTask:
    def __init__(self):
        self.queue = JoinableQueue()
        self.event = Event()
        atexit.register( self.queue.join )

        process = Process(target=self.work)
        process.daemon = True
        process.start()


    def work(self):
        while True:
            func, args, wait_for = self.queue.get()

            for evt in wait_for: 
                evt.wait()
            func(*args)
            self.event.set()

            self.queue.task_done()


    def enqueue(self, func, args=[], wait_for=[]):
        self.event.clear()
        self.queue.put( (func, args, wait_for) )

        return self.event 
Exemplo n.º 25
0
class AMQPInput(BaseInput):

	log = logging.getLogger("%s.AMQPInput" %(__name__))

	def __init__(self, aggr_config, namespace="local", config={}):
		super(AMQPInput, self).__init__(aggr_config, namespace, config)
		self.exit = Event()

	def run(self):
		self.log.info("Connecting to (%s): %s" %(self.outputType, self.outputUri))
		aggrConn = self.connectAggregator()

		rabbitmqConsumer = RabbitMQConsumer(**self.config)

		def callback(event):
			if not event.has_key('namespace'):
				event['namespace'] = self.namespace

			aggrConn.send(event)

			if self.exit.is_set():
				aggrConn.close()
				rabbitmqConsumer.stop()

		rabbitmqConsumer.userCallbacks = [ callback ]	
		rabbitmqConsumer.start()

	def shutdown(self):
		self.exit.set()
Exemplo n.º 26
0
class SharedFile(object):
    def __init__(self, filename):
        self.filename = filename
        self.fevent = Event()
        # self.state = Value('i', 0)
        self.fevent.set()

    def write(self, mode, data):
        # print("Write {}".format(inspect.stack()[1][3]))
        self.wait_freedom_and_lock()

        f = open(self.filename, mode)
        f.write(data)
        f.close
        self.unlock()

    def read(self):
        # print("Read {}".format(inspect.stack()[1][3]))
        self.wait_freedom_and_lock()

        f = open(self.filename, 'r')
        data = f.read()
        f.close
        self.unlock()
        return data

    def wait_freedom_and_lock(self):
        self.fevent.wait()
        self.fevent.clear()
        # return

    def unlock(self):
        self.fevent.set()
Exemplo n.º 27
0
class Updater(Process):

    def __init__(self, maxsize=15):
        Process.__init__(self)
        #self.queue = Queue(maxsize)
        self.queue = Queue()
        self.queue_lock = Lock()
        self._exit = Event()

    def run(self):
        while not self._exit.is_set():
            #with self.queue_lock:
            self.queue.put(self.receive())
            #self.queue.put_nowait(self.receive())
            #if self.queue.full():
            #    try:
            #        self.queue.get_nowait()
            #    except:
            #        pass

    def stop(self):
        self._exit.set()
        # This leaves the process hanging on Windows
        #self.join(STOP_TIMEOUT)
        if self.is_alive():
            #TODO make a nicer warning
            print 'Terminating updater:', self
            self.terminate()

    def receive(self):
        raise NotImplementedError
Exemplo n.º 28
0
class StoppableProcess(Process):
    exit = None
    sleep = None

    def __init__(self, sleep=1, *args, **kwargs):
        self.exit = Event()
        self.sleep = sleep
        super(StoppableProcess, self).__init__(*args, **kwargs)

    def _setup(self):
        pass

    def _teardown(self):
        pass

    def _ping(self):
        raise NotImplementedError

    def _should_exit(self):
        return self.exit.wait(0)

    def run(self):
        self._setup()
        while True:
            if self._ping() or self.exit.wait(self.sleep * 1.0):
                self._teardown()
                return

    def stop(self):
        self.exit.set()
        self.join(self.sleep)
        if self.is_alive():
            self.terminate()
Exemplo n.º 29
0
class fmanager:
    def __init__(self,data,fn):
        self.sf = Event()
        self.sf.clear()

        self.nproc=cpu_count()
        self.pipes = [Pipe() for i in xrange(self.nproc)]
        self.e = [evaluator(self.pipes[i][1],self.sf,data,fn) for i in xrange(self.nproc)]
        null = [i.start() for i in self.e]
        return

    def __del__(self):
        self.sf.set()
        null = [i.join() for i in self.e]
        null = [i.terminate() for i in self.e]

        return

    def eval(self,x):
        nd = len(x)
        for i in xrange(nd):
            self.pipes[i % self.nproc][0].send([i, x[i]])
        solns = []
        while len(solns) < nd:
            for i in xrange(self.nproc):
                if self.pipes[i][0].poll(0.005):
                    solns.append(self.pipes[i][0].recv())
        solns.sort(key=lambda i: i[0])
        return [i[1] for i in solns]
Exemplo n.º 30
0
class StoppableProcess(Process):
    """ Base class for Processes which require the ability
    to be stopped by a process-safe method call
    """

    def __init__(self):
        self._should_stop = Event()
        self._should_stop.clear()
        super(StoppableProcess, self).__init__()

    def join(self, timeout=0):
        """ Joins the current process and forces it to stop after
        the timeout if necessary

        :param timeout: Timeout duration in seconds
        """
        self._should_stop.wait(timeout)
        if not self.should_stop():
            self.stop()
        super(StoppableProcess, self).join(0)

    def stop(self):
        self._should_stop.set()

    def should_stop(self):
        return self._should_stop.is_set()

    def __repr__(self):
        return "<%s(should_stop=%s)>" % (
            self.__class__.__name__, self.should_stop())
Exemplo n.º 31
0
def main():
    q = Queue()
    # The main process gets a simple configuration which prints to the console.
    config_inital = {
        'version': 1,
        'formatters': {
            'detailed': {
                'class':'logging.Formatter',
                'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
            }
        },
        'handlers':{
            'console': {
                'class': 'logging.StreamHandler',
                'level': 'INFO',
            },
        },
        'root': {
            'level': 'DEBUG',
            'handlers': ['console']
        }
    }
    # The worker process configuration is just a QueueHandler attached to the
    # root logger, which allows all messages to be sent to the queue.
    # We disable existing loggers to disable the "setup" logger used in the
    # parent process. This is needed on POSIX because the logger will
    # be there in the child following a fork().

    config_worker = {
        'version': 1,
        'disable_existing_loggers': True,
        'handlers': {
            'queue': {
                'class': 'logging.handlers.QueueHandler',
                'queue': q,
            },
        },
        'root': {
            'level': 'DEBUG',
            'handlers': ['queue']
        },
    }
    # The listener process configuration shows that the full flexibility of
    # logging configuration is available to dispatch events to handlers however
    # you want.
    # We disable existing loggers to disable the "setup" logger used in the
    # parent process. This is needed on POSIX because the logger will
    # be there in the child following a fork().
    config_listener = {
        'version': 1,
        'formatters': {
            'detailed': {
                'class': 'logging.Formatter',
                'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
            },
            'simple': {
                'class': 'logging.Formatter',
                'format': '%(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
            },
        },
        'handlers': {
            'console': {
                'class': 'logging.StreamHandler',
                'level': 'INFO',
                'formatter': 'simple',
            },
            'file': {
                'class': 'logging.FileHandler',
                'filename': './log/mplog.log',
                'mode': 'w',
                'formatter': 'detailed',
            },
            'foofile': {
                'class': 'logging.FileHandler',
                'filename': './log/mplog-foo.log',
                'mode': 'w',
                'formatter': 'detailed',
            },
            'errors': {
                'class': 'logging.FileHandler',
                'filename': './log/mplog-errors.log',
                'mode': 'w',
                'level': 'ERROR',
                'formatter': 'detailed',
            },
        },
        'loggers': {
            'foo': {
                'handlers': ['foofile']
            },
            'root': {
                'level': 'DEBUG',
                'handlers': ['console', 'file', 'errors']
            }
        }
    }

    logging.config.dictConfig(config_inital)
    logger = logging.getLogger('setup')
    logger.info('About to create workers')
    workers = []
    for i in range(5):
        wp = Process(target=worker_process, name='worker: {}'.format(i + 1),
                     args=(config_worker,) )
        workers.append(wp)
        wp.start()
        logger.info('started worker: {}'.format(wp.name))
    logger.info('About to create listener ...')
    stop_event = Event()
    lp = Process(target=listener_process, name='listener',
                 args=(q, stop_event, config_listener))
    lp.start()
    logger.info('Started listener')
    for wp in workers:
        wp.join()
    # Workers all done, listening can now stop.
    # Logging in the parent still works normally.
    logger.info('Telling listener to stop ...')
    stop_event.set()
    lp.join()
    logger.info('All done')
Exemplo n.º 32
0
to_process = itertools.islice(runtime_files, args.skip, stop_index)

log("Setting up workers.")
# Set up multiprocessing result list and queue.
manager = Manager()

# This list contains analysis results as
# (filename, category, meta, analytics) quadruples.
res_list = manager.list()

# Holds results transiently before flushing to res_list
res_queue = SimpleQueue()

# Start the periodic flush process, only run while run_signal is set.
run_signal = Event()
run_signal.set()
flush_proc = Process(target=flush_queue,
                     args=(FLUSH_PERIOD, run_signal, res_queue, res_list))
flush_proc.start()

workers = []
avail_jobs = list(range(args.jobs))
contract_iter = enumerate(to_process)
contracts_exhausted = False

log("Analysing...\n")
try:
    while not contracts_exhausted:

        # If there's both workers and contracts available, use the former to work on the latter.
        while not contracts_exhausted and len(avail_jobs) > 0:
Exemplo n.º 33
0
Arquivo: word.py Projeto: t0z/tuby
        if reset:
            reset = False
            started_on = time()
        if time() - timeout > started_on:
            return True
        return False

    return wakeup


"""Main
"""
pool = Pool()
queue = Queue()
alive = Event()
alive.set()
count = 0
done = 0
process = []
started_on = time()
stat_files = Value('i', 0)
"""Listing file to read"""
for dirin in TUBY.stdin:
    TUBY.log(u'+ input directory: %s', dirin.strip())
    p = Process(target=fill, args=(dirin.strip(), ))
    p.daemon = False
    p.start()
    process.append(p)
    fill(dirin)

sleep(0.1)
Exemplo n.º 34
0
class GPUMonitor(Process):
    def __init__(self, logger=None):
        self.logger = logger or ScreenLogger()

        # Prepare signal
        self.stop_signal = Event()
        self.run_signal = Event()
        self.set_signal = Event()

        # Stores list of available GPUs
        self._free_GPUs = Queue()

        super(GPUMonitor, self).__init__(target=self._monitor)
        self.start()

    def stop(self):
        self.stop_signal.set()

    def _monitor(self):
        while not self.stop_signal.is_set():
            if self.run_signal.is_set():
                # Empty queue, just in case...?
                self._free_GPUs.empty()

                # Get free GPUs
                free = get_free_gpus()

                # Add number of elements that will be put in the queue as first
                # element to be read from main process
                self._free_GPUs.put(len(free))

                # Add available GPUs to queue
                for g in free:
                    self._free_GPUs.put(g)

                # Set the signal that main process can start reading queue
                self.set_signal.set()

                # Stop run signal for this process
                self.run_signal.clear()
            else:
                time.sleep(0.5)
                self.set_signal.clear()

    @property
    def free_GPUs(self):
        self.run_signal.set()
        while not self.set_signal.is_set():
            time.sleep(0.2)

        free = []
        for i in range(self._free_GPUs.get()):
            free.append(self._free_GPUs.get())
        return free

    def get_free_GPUs(self, N):
        return _get_free_gpu(self.free_GPUs, N)

    def await_and_set_free_GPU(self, N=0, sleep_seconds=60, stop_after=False):
        cuda_visible_dev = ""
        if N != 0:
            self.logger("Waiting for free GPU.")
            found_gpu = False
            while not found_gpu:
                cuda_visible_dev = self.get_free_GPUs(N=N)
                if cuda_visible_dev:
                    self.logger("Found free GPU: %s" % cuda_visible_dev)
                    found_gpu = True
                else:
                    self.logger("No available GPUs... Sleeping %i seconds." %
                                sleep_seconds)
                    time.sleep(sleep_seconds)
        else:
            self.logger("Using CPU based computations only!")
        self.set_GPUs = cuda_visible_dev
        if stop_after:
            self.stop()

    @property
    def num_currently_visible(self):
        return len(self.set_GPUs.strip().split(","))

    @property
    def set_GPUs(self):
        try:
            return os.environ["CUDA_VISIBLE_DEVICES"]
        except KeyError:
            return ""

    @set_GPUs.setter
    def set_GPUs(self, GPUs):
        set_gpu(GPUs)

    def set_and_stop(self, GPUs):
        self.set_GPUs = GPUs
        self.stop()
Exemplo n.º 35
0
def main():
    args = parse_args()
    timeout = args.timeout

    ## start crawler
    # crawler_noticer = Event()
    # crawler_noticer.clear()
    # result_noticer = Event()
    # result_noticer.clear()
    # qreader, qwriter = Pipe()
    # stdreader, stdwriter = Pipe()
    # crawler = multiprocessing.Process(
    #     target=crawler_daemon,
    #     args=(crawler_noticer, qreader, result_noticer, stdwriter)
    # )
    # crawler.daemon = True
    # crawler.start()

    adb_bin = get_adb_tool()
    if use_monitor:
        os.system("{0} connect 127.0.0.1:62001".format(adb_bin))

    check_screenshot(filename="screenshot.png", directory=data_directory)

    if enable_chrome:
        closer = Event()
        noticer = Event()
        closer.clear()
        noticer.clear()
        reader, writer = Pipe()
        browser_daemon = multiprocessing.Process(target=run_browser,
                                                 args=(
                                                     closer,
                                                     noticer,
                                                     reader,
                                                 ))
        browser_daemon.daemon = True
        browser_daemon.start()

    def __inner_job():
        start = time.time()
        text_binary = analyze_current_screen_text(
            directory=data_directory,
            compress_level=image_compress_level[0],
            crop_area=crop_areas[game_type],
            use_monitor=use_monitor)
        keywords = get_text_from_image(image_data=text_binary, )
        if not keywords:
            print("text not recognize")
            return

        true_flag, real_question, question, answers = parse_question_and_answer(
            keywords)

        ## notice crawler to work
        # qwriter.send(real_question.strip("?"))
        # crawler_noticer.set()

        print('-' * 72)
        print(real_question)
        print('-' * 72)
        print("\n".join(answers))

        # notice browser
        if enable_chrome:
            writer.send(question)
            noticer.set()

        search_question = pre_process_question(question)
        summary = baidu_count(search_question, answers, timeout=timeout)
        summary_li = sorted(summary.items(),
                            key=operator.itemgetter(1),
                            reverse=True)
        data = [("选项", "同比")]
        for a, w in summary_li:
            data.append((a, w))
        table = AsciiTable(data)
        print(table.table)

        print("*" * 72)
        if true_flag:
            print("肯定回答(**): ", summary_li[0][0])
            print("否定回答(  ): ", summary_li[-1][0])
        else:
            print("肯定回答(  ): ", summary_li[0][0])
            print("否定回答(**): ", summary_li[-1][0])
        print("*" * 72)

        # try crawler
        # retry = 4
        # while retry:
        #     if result_noticer.is_set():
        #         print("~" * 60)
        #         print(stdreader.recv())
        #         print("~" * 60)
        #         break
        #     retry -= 1
        #     time.sleep(1)
        # result_noticer.clear()

        print("~" * 60)
        print(kwquery(real_question.strip("?")))
        print("~" * 60)

        end = time.time()
        print("use {0} 秒".format(end - start))
        save_screen(directory=data_directory)

    print("""
    请选择答题节目:
      1. 百万英雄
      2. 冲顶大会
    """)
    game_type = input("输入节目序号: ")
    if game_type == "1":
        game_type = '百万英雄'
    elif game_type == "2":
        game_type = '冲顶大会'
    else:
        game_type = '百万英雄'

    while True:
        print("""
    请在答题开始前就运行程序,
    答题开始的时候按Enter预测答案
                """)

        print("当前选择答题游戏: {}\n".format(game_type))

        enter = input("按Enter键开始,按ESC键退出...")
        if enter == chr(27):
            break
        try:
            __inner_job()
        except Exception as e:
            print(str(e))

        print("欢迎下次使用")

    if enable_chrome:
        reader.close()
        writer.close()
        closer.set()
        time.sleep(3)
Exemplo n.º 36
0
class Artifacts(object):
    max_preview_size_bytes = 65536

    _flush_frequency_sec = 300.
    # notice these two should match
    _save_format = '.csv.gz'
    _compression = 'gzip'
    # hashing constants
    _hash_block_size = 65536
    _pd_artifact_type = 'data-audit-table'

    class _ProxyDictWrite(dict):
        """ Dictionary wrapper that updates an arguments instance on any item set in the dictionary """
        def __init__(self, artifacts_manager, *args, **kwargs):
            super(Artifacts._ProxyDictWrite, self).__init__(*args, **kwargs)
            self._artifacts_manager = artifacts_manager
            # list of artifacts we should not upload (by name & weak-reference)
            self.artifact_metadata = {}
            # list of hash columns to calculate uniqueness for the artifacts
            self.artifact_hash_columns = {}

        def __setitem__(self, key, value):
            # check that value is of type pandas
            if pd and isinstance(value, pd.DataFrame):
                super(Artifacts._ProxyDictWrite, self).__setitem__(key, value)

                if self._artifacts_manager:
                    self._artifacts_manager.flush()
            else:
                raise ValueError(
                    'Artifacts currently support pandas.DataFrame objects only'
                )

        def unregister_artifact(self, name):
            self.artifact_metadata.pop(name, None)
            self.pop(name, None)

        def add_metadata(self, name, metadata):
            self.artifact_metadata[name] = deepcopy(metadata)

        def get_metadata(self, name):
            return self.artifact_metadata.get(name)

        def add_hash_columns(self, artifact_name, hash_columns):
            self.artifact_hash_columns[artifact_name] = hash_columns

        def get_hash_columns(self, artifact_name):
            return self.artifact_hash_columns.get(artifact_name)

    @property
    def registered_artifacts(self):
        # type: () -> Dict[str, Artifact]
        return self._artifacts_container

    @property
    def summary(self):
        # type: () -> str
        return self._summary

    def __init__(self, task):
        self._task = task
        # notice the double link, this important since the Artifact
        # dictionary needs to signal the Artifacts base on changes
        self._artifacts_container = self._ProxyDictWrite(self)
        self._last_artifacts_upload = {}
        self._unregister_request = set()
        self._thread = None
        self._flush_event = Event()
        self._exit_flag = False
        self._summary = ''
        self._temp_folder = []
        self._task_artifact_list = []
        self._task_edit_lock = RLock()
        self._storage_prefix = None

    def register_artifact(self,
                          name,
                          artifact,
                          metadata=None,
                          uniqueness_columns=True):
        # type: (str, DataFrame, Optional[dict], Union[bool, Sequence[str]]) -> ()
        """
        :param str name: name of the artifacts. Notice! it will override previous artifacts if name already exists.
        :param pandas.DataFrame artifact: artifact object, supported artifacts object types: pandas.DataFrame
        :param dict metadata: dictionary of key value to store with the artifact (visible in the UI)
        :param list uniqueness_columns: list of columns for artifact uniqueness comparison criteria. The default value
            is True, which equals to all the columns (same as artifact.columns).
        """
        # currently we support pandas.DataFrame (which we will upload as csv.gz)
        if name in self._artifacts_container:
            LoggerRoot.get_base_logger().info(
                'Register artifact, overwriting existing artifact \"{}\"'.
                format(name))
        self._artifacts_container.add_hash_columns(
            name,
            list(artifact.columns
                 if uniqueness_columns is True else uniqueness_columns))
        self._artifacts_container[name] = artifact
        if metadata:
            self._artifacts_container.add_metadata(name, metadata)

    def unregister_artifact(self, name):
        # type: (str) -> ()
        # Remove artifact from the watch list
        self._unregister_request.add(name)
        self.flush()

    def upload_artifact(self,
                        name,
                        artifact_object=None,
                        metadata=None,
                        preview=None,
                        delete_after_upload=False,
                        auto_pickle=True):
        # type: (str, Optional[object], Optional[dict], Optional[str], bool, bool) -> bool
        if not Session.check_min_api_version('2.3'):
            LoggerRoot.get_base_logger().warning(
                'Artifacts not supported by your TRAINS-server version, '
                'please upgrade to the latest server version')
            return False

        if name in self._artifacts_container:
            raise ValueError(
                "Artifact by the name of {} is already registered, use register_artifact"
                .format(name))

        # cast preview to string
        if preview:
            preview = str(preview)

        # convert string to object if try is a file/folder (dont try to serialize long texts
        if isinstance(artifact_object,
                      six.string_types) and len(artifact_object) < 2048:
            # noinspection PyBroadException
            try:
                artifact_path = Path(artifact_object)
                if artifact_path.exists():
                    artifact_object = artifact_path
                elif '*' in artifact_object or '?' in artifact_object:
                    # hackish, detect wildcard in tr files
                    folder = Path('').joinpath(*artifact_path.parts[:-1])
                    if folder.is_dir() and folder.parts:
                        wildcard = artifact_path.parts[-1]
                        if list(Path(folder).rglob(wildcard)):
                            artifact_object = artifact_path
            except Exception:
                pass

        artifact_type_data = tasks.ArtifactTypeData()
        artifact_type_data.preview = ''
        override_filename_in_uri = None
        override_filename_ext_in_uri = None
        uri = None
        if np and isinstance(artifact_object, np.ndarray):
            artifact_type = 'numpy'
            artifact_type_data.content_type = 'application/numpy'
            artifact_type_data.preview = preview or str(
                artifact_object.__repr__())
            override_filename_ext_in_uri = '.npz'
            override_filename_in_uri = name + override_filename_ext_in_uri
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.close(fd)
            np.savez_compressed(local_filename, **{name: artifact_object})
            delete_after_upload = True
        elif pd and isinstance(artifact_object, pd.DataFrame):
            artifact_type = 'pandas'
            artifact_type_data.content_type = 'text/csv'
            artifact_type_data.preview = preview or str(
                artifact_object.__repr__())
            override_filename_ext_in_uri = self._save_format
            override_filename_in_uri = name
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.close(fd)
            artifact_object.to_csv(local_filename,
                                   compression=self._compression)
            delete_after_upload = True
        elif isinstance(artifact_object, Image.Image):
            artifact_type = 'image'
            artifact_type_data.content_type = 'image/png'
            desc = str(artifact_object.__repr__())
            artifact_type_data.preview = preview or desc[1:desc.find(' at ')]
            override_filename_ext_in_uri = '.png'
            override_filename_in_uri = name + override_filename_ext_in_uri
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.close(fd)
            artifact_object.save(local_filename)
            delete_after_upload = True
        elif isinstance(artifact_object, dict):
            artifact_type = 'JSON'
            artifact_type_data.content_type = 'application/json'
            preview = preview or json.dumps(
                artifact_object, sort_keys=True, indent=4)
            override_filename_ext_in_uri = '.json'
            override_filename_in_uri = name + override_filename_ext_in_uri
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.write(fd, bytes(preview.encode()))
            os.close(fd)
            if len(preview) < self.max_preview_size_bytes:
                artifact_type_data.preview = preview
            else:
                artifact_type_data.preview = '# full json too large to store, storing first {}kb\n{}'.format(
                    self.max_preview_size_bytes // 1024,
                    preview[:self.max_preview_size_bytes])

            delete_after_upload = True
        elif isinstance(artifact_object, (
                Path,
                pathlib_Path,
        ) if pathlib_Path is not None else (Path, )):
            # check if single file
            artifact_object = Path(artifact_object)

            artifact_object.expanduser().absolute()
            # noinspection PyBroadException
            try:
                create_zip_file = not artifact_object.is_file()
            except Exception:  # Hack for windows pathlib2 bug, is_file isn't valid.
                create_zip_file = True
            else:  # We assume that this is not Windows os
                if artifact_object.is_dir():
                    # change to wildcard
                    artifact_object /= '*'

            if create_zip_file:
                folder = Path('').joinpath(*artifact_object.parts[:-1])
                if not folder.is_dir() or not folder.parts:
                    raise ValueError(
                        "Artifact file/folder '{}' could not be found".format(
                            artifact_object.as_posix()))

                wildcard = artifact_object.parts[-1]
                files = list(Path(folder).rglob(wildcard))
                override_filename_ext_in_uri = '.zip'
                override_filename_in_uri = folder.parts[
                    -1] + override_filename_ext_in_uri
                fd, zip_file = mkstemp(
                    prefix=quote(folder.parts[-1], safe="") + '.',
                    suffix=override_filename_ext_in_uri)
                try:
                    artifact_type_data.content_type = 'application/zip'
                    archive_preview = 'Archive content {}:\n'.format(
                        artifact_object.as_posix())

                    with ZipFile(zip_file,
                                 'w',
                                 allowZip64=True,
                                 compression=ZIP_DEFLATED) as zf:
                        for filename in sorted(files):
                            if filename.is_file():
                                relative_file_name = filename.relative_to(
                                    folder).as_posix()
                                archive_preview += '{} - {}\n'.format(
                                    relative_file_name,
                                    humanfriendly.format_size(
                                        filename.stat().st_size))
                                zf.write(filename.as_posix(),
                                         arcname=relative_file_name)
                except Exception as e:
                    # failed uploading folder:
                    LoggerRoot.get_base_logger().warning(
                        'Exception {}\nFailed zipping artifact folder {}'.
                        format(folder, e))
                    return False
                finally:
                    os.close(fd)
                artifact_type_data.preview = preview or archive_preview
                artifact_object = zip_file
                artifact_type = 'archive'
                artifact_type_data.content_type = mimetypes.guess_type(
                    artifact_object)[0]
                local_filename = artifact_object
                delete_after_upload = True
            else:
                if not artifact_object.is_file():
                    raise ValueError(
                        "Artifact file '{}' could not be found".format(
                            artifact_object.as_posix()))

                override_filename_in_uri = artifact_object.parts[-1]
                artifact_type_data.preview = preview or '{} - {}\n'.format(
                    artifact_object,
                    humanfriendly.format_size(artifact_object.stat().st_size))
                artifact_object = artifact_object.as_posix()
                artifact_type = 'custom'
                artifact_type_data.content_type = mimetypes.guess_type(
                    artifact_object)[0]
                local_filename = artifact_object
        elif (isinstance(artifact_object, six.string_types)
              and len(artifact_object) < 4096
              and urlparse(artifact_object).scheme in remote_driver_schemes):
            # we should not upload this, just register
            local_filename = None
            uri = artifact_object
            artifact_type = 'custom'
            artifact_type_data.content_type = mimetypes.guess_type(
                artifact_object)[0]
        elif isinstance(artifact_object, six.string_types):
            # if we got here, we should store it as text file.
            artifact_type = 'string'
            artifact_type_data.content_type = 'text/plain'
            if preview:
                artifact_type_data.preview = preview
            elif len(artifact_object) < self.max_preview_size_bytes:
                artifact_type_data.preview = artifact_object
            else:
                artifact_type_data.preview = '# full text too large to store, storing first {}kb\n{}'.format(
                    self.max_preview_size_bytes // 1024,
                    artifact_object[:self.max_preview_size_bytes])
            delete_after_upload = True
            override_filename_ext_in_uri = '.txt'
            override_filename_in_uri = name + override_filename_ext_in_uri
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.close(fd)
            # noinspection PyBroadException
            try:
                with open(local_filename, 'wt') as f:
                    f.write(artifact_object)
            except Exception:
                # cleanup and raise exception
                os.unlink(local_filename)
                raise
        elif auto_pickle:
            # if we are here it means we do not know what to do with the object, so we serialize it with pickle.
            artifact_type = 'pickle'
            artifact_type_data.content_type = 'application/pickle'
            # noinspection PyBroadException
            try:
                artifact_type_data.preview = preview or str(
                    artifact_object.__repr__())[:self.max_preview_size_bytes]
            except Exception:
                artifact_type_data.preview = preview or ''
            delete_after_upload = True
            override_filename_ext_in_uri = '.pkl'
            override_filename_in_uri = name + override_filename_ext_in_uri
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.close(fd)
            # noinspection PyBroadException
            try:
                with open(local_filename, 'wb') as f:
                    pickle.dump(artifact_object, f)
            except Exception:
                # cleanup and raise exception
                os.unlink(local_filename)
                raise
        else:
            raise ValueError("Artifact type {} not supported".format(
                type(artifact_object)))

        # remove from existing list, if exists
        for artifact in self._task_artifact_list:
            if artifact.key == name:
                if artifact.type == self._pd_artifact_type:
                    raise ValueError(
                        "Artifact of name {} already registered, "
                        "use register_artifact instead".format(name))

                self._task_artifact_list.remove(artifact)
                break

        if not local_filename:
            file_size = None
            file_hash = None
        else:
            # check that the file to upload exists
            local_filename = Path(local_filename).absolute()
            if not local_filename.exists() or not local_filename.is_file():
                LoggerRoot.get_base_logger().warning(
                    'Artifact upload failed, cannot find file {}'.format(
                        local_filename.as_posix()))
                return False

            file_hash, _ = self.sha256sum(local_filename.as_posix())
            file_size = local_filename.stat().st_size

            uri = self._upload_local_file(
                local_filename,
                name,
                delete_after_upload=delete_after_upload,
                override_filename=override_filename_in_uri,
                override_filename_ext=override_filename_ext_in_uri)

        timestamp = int(time())

        artifact = tasks.Artifact(
            key=name,
            type=artifact_type,
            uri=uri,
            content_size=file_size,
            hash=file_hash,
            timestamp=timestamp,
            type_data=artifact_type_data,
            display_data=[(str(k), str(v))
                          for k, v in metadata.items()] if metadata else None)

        # update task artifacts
        with self._task_edit_lock:
            self._task_artifact_list.append(artifact)
            self._task.set_artifacts(self._task_artifact_list)

        return True

    def flush(self):
        # type: () -> ()
        # start the thread if it hasn't already:
        self._start()
        # flush the current state of all artifacts
        self._flush_event.set()

    def stop(self, wait=True):
        # type: (bool) -> ()
        # stop the daemon thread and quit
        # wait until thread exists
        self._exit_flag = True
        self._flush_event.set()
        if wait:
            if self._thread:
                self._thread.join()
            # remove all temp folders
            for f in self._temp_folder:
                # noinspection PyBroadException
                try:
                    Path(f).rmdir()
                except Exception:
                    pass

    def _start(self):
        # type: () -> ()
        """ Start daemon thread if any artifacts are registered and thread is not up yet """
        if not self._thread and self._artifacts_container:
            # start the daemon thread
            self._flush_event.clear()
            self._thread = Thread(target=self._daemon)
            self._thread.daemon = True
            self._thread.start()

    def _daemon(self):
        # type: () -> ()
        while not self._exit_flag:
            self._flush_event.wait(self._flush_frequency_sec)
            self._flush_event.clear()
            artifact_keys = list(self._artifacts_container.keys())
            for name in artifact_keys:
                try:
                    self._upload_data_audit_artifacts(name)
                except Exception as e:
                    LoggerRoot.get_base_logger().warning(str(e))

        # create summary
        self._summary = self._get_statistics()

    def _upload_data_audit_artifacts(self, name):
        # type: (str) -> ()
        logger = self._task.get_logger()
        pd_artifact = self._artifacts_container.get(name)
        pd_metadata = self._artifacts_container.get_metadata(name)

        # remove from artifacts watch list
        if name in self._unregister_request:
            try:
                self._unregister_request.remove(name)
            except KeyError:
                pass
            self._artifacts_container.unregister_artifact(name)

        if pd_artifact is None:
            return

        override_filename_ext_in_uri = self._save_format
        override_filename_in_uri = name
        fd, local_csv = mkstemp(prefix=quote(name, safe="") + '.',
                                suffix=override_filename_ext_in_uri)
        os.close(fd)
        local_csv = Path(local_csv)
        pd_artifact.to_csv(local_csv.as_posix(),
                           index=False,
                           compression=self._compression)
        current_sha2, file_sha2 = self.sha256sum(local_csv.as_posix(),
                                                 skip_header=32)
        if name in self._last_artifacts_upload:
            previous_sha2 = self._last_artifacts_upload[name]
            if previous_sha2 == current_sha2:
                # nothing to do, we can skip the upload
                # noinspection PyBroadException
                try:
                    local_csv.unlink()
                except Exception:
                    pass
                return
        self._last_artifacts_upload[name] = current_sha2

        # If old trains-server, upload as debug image
        if not Session.check_min_api_version('2.3'):
            logger.report_image(title='artifacts',
                                series=name,
                                local_path=local_csv.as_posix(),
                                delete_after_upload=True,
                                iteration=self._task.get_last_iteration(),
                                max_image_history=2)
            return

        # Find our artifact
        artifact = None
        for an_artifact in self._task_artifact_list:
            if an_artifact.key == name:
                artifact = an_artifact
                break

        file_size = local_csv.stat().st_size

        # upload file
        uri = self._upload_local_file(
            local_csv,
            name,
            delete_after_upload=True,
            override_filename=override_filename_in_uri,
            override_filename_ext=override_filename_ext_in_uri)

        # update task artifacts
        with self._task_edit_lock:
            if not artifact:
                artifact = tasks.Artifact(key=name,
                                          type=self._pd_artifact_type)
                self._task_artifact_list.append(artifact)
            artifact_type_data = tasks.ArtifactTypeData()

            artifact_type_data.data_hash = current_sha2
            artifact_type_data.content_type = "text/csv"
            artifact_type_data.preview = str(
                pd_artifact.__repr__()) + '\n\n' + self._get_statistics(
                    {name: pd_artifact})

            artifact.type_data = artifact_type_data
            artifact.uri = uri
            artifact.content_size = file_size
            artifact.hash = file_sha2
            artifact.timestamp = int(time())
            artifact.display_data = [
                (str(k), str(v)) for k, v in pd_metadata.items()
            ] if pd_metadata else None

            self._task.set_artifacts(self._task_artifact_list)

    def _upload_local_file(self,
                           local_file,
                           name,
                           delete_after_upload=False,
                           override_filename=None,
                           override_filename_ext=None):
        # type: (str, str, bool, Optional[str], Optional[str]) -> str
        """
        Upload local file and return uri of the uploaded file (uploading in the background)
        """
        upload_uri = self._task.output_uri or self._task.get_logger(
        ).get_default_upload_destination()
        if not isinstance(local_file, Path):
            local_file = Path(local_file)
        ev = UploadEvent(
            metric='artifacts',
            variant=name,
            image_data=None,
            upload_uri=upload_uri,
            local_image_path=local_file.as_posix(),
            delete_after_upload=delete_after_upload,
            override_filename=override_filename,
            override_filename_ext=override_filename_ext,
            override_storage_key_prefix=self._get_storage_uri_prefix())
        _, uri = ev.get_target_full_upload_uri(upload_uri)

        # send for upload
        # noinspection PyProtectedMember
        self._task.reporter._report(ev)

        return uri

    def _get_statistics(self, artifacts_dict=None):
        # type: (Optional[Dict[str, Artifact]]) -> str
        summary = ''
        artifacts_dict = artifacts_dict or self._artifacts_container
        thread_pool = ThreadPool()

        try:
            # build hash row sets
            artifacts_summary = []
            for a_name, a_df in artifacts_dict.items():
                hash_cols = self._artifacts_container.get_hash_columns(a_name)
                if not pd or not isinstance(a_df, pd.DataFrame):
                    continue

                if hash_cols is True:
                    hash_col_drop = []
                else:
                    hash_cols = set(hash_cols)
                    missing_cols = hash_cols.difference(a_df.columns)
                    if missing_cols == hash_cols:
                        LoggerRoot.get_base_logger().warning(
                            'Uniqueness columns {} not found in artifact {}. '
                            'Skipping uniqueness check for artifact.'.format(
                                list(missing_cols), a_name))
                        continue
                    elif missing_cols:
                        # missing_cols must be a subset of hash_cols
                        hash_cols.difference_update(missing_cols)
                        LoggerRoot.get_base_logger().warning(
                            'Uniqueness columns {} not found in artifact {}. Using {}.'
                            .format(list(missing_cols), a_name,
                                    list(hash_cols)))

                    hash_col_drop = [
                        col for col in a_df.columns if col not in hash_cols
                    ]

                a_unique_hash = set()

                def hash_row(r):
                    a_unique_hash.add(hash(bytes(r)))

                a_shape = a_df.shape
                # parallelize
                a_hash_cols = a_df.drop(columns=hash_col_drop)
                thread_pool.map(hash_row, a_hash_cols.values)
                # add result
                artifacts_summary.append((
                    a_name,
                    a_shape,
                    a_unique_hash,
                ))

            # build intersection summary
            for i, (name, shape, unique_hash) in enumerate(artifacts_summary):
                summary += '[{name}]: shape={shape}, {unique} unique rows, {percentage:.1f}% uniqueness\n'.format(
                    name=name,
                    shape=shape,
                    unique=len(unique_hash),
                    percentage=100 * len(unique_hash) / float(shape[0]))
                for name2, shape2, unique_hash2 in artifacts_summary[i + 1:]:
                    intersection = len(unique_hash & unique_hash2)
                    summary += '\tIntersection with [{name2}] {intersection} rows: {percentage:.1f}%\n'.format(
                        name2=name2,
                        intersection=intersection,
                        percentage=100 * intersection /
                        float(len(unique_hash2)))
        except Exception as e:
            LoggerRoot.get_base_logger().warning(str(e))
        finally:
            thread_pool.close()
            thread_pool.terminate()
        return summary

    def _get_temp_folder(self, force_new=False):
        # type: (bool) -> str
        if force_new or not self._temp_folder:
            new_temp = mkdtemp(prefix='artifacts_')
            self._temp_folder.append(new_temp)
            return new_temp
        return self._temp_folder[0]

    def _get_storage_uri_prefix(self):
        # type: () -> str
        if not self._storage_prefix:
            # noinspection PyProtectedMember
            self._storage_prefix = self._task._get_output_destination_suffix()
        return self._storage_prefix

    @staticmethod
    def sha256sum(filename, skip_header=0):
        # type: (str, int) -> (Optional[str], Optional[str])
        # create sha2 of the file, notice we skip the header of the file (32 bytes)
        # because sometimes that is the only change
        h = hashlib.sha256()
        file_hash = hashlib.sha256()
        b = bytearray(Artifacts._hash_block_size)
        mv = memoryview(b)
        try:
            with open(filename, 'rb', buffering=0) as f:
                # skip header
                if skip_header:
                    file_hash.update(f.read(skip_header))
                # noinspection PyUnresolvedReferences
                for n in iter(lambda: f.readinto(mv), 0):
                    h.update(mv[:n])
                    if skip_header:
                        file_hash.update(mv[:n])
        except Exception as e:
            LoggerRoot.get_base_logger().warning(str(e))
            return None, None

        return h.hexdigest(), file_hash.hexdigest() if skip_header else None
pairList = []
pairBinanceNameMapping = dict()
# pairList = ['ethbtc@depth20', 'ltcbtc@depth20']

# Generate pair list
for pair in config['exchange']['symbols']:
    pairSplit = pair.lower().split('/')
    if pairSplit[0] == "BCH":
        pairSplit[0] = "BCHABC"

    pairElement = pairSplit[0] + pairSplit[1] + '@depth' + str(orderbookDepth)
    pairList.append(pairElement)
    pairBinanceNameMapping[pairElement] = pair

# Keep on restarting the BinanceSocketManager process periodically
while True:
    stopProcessesEvent = Event()
    process = Process(target=CryptoArbBinanceOrderBookProcess,
                      args=(stopProcessesEvent, ))

    process.daemon = True
    process.start()

    time.sleep(restartPeriodSeconds)

    stopProcessesEvent.set()
    process.join()

logger.info("binance-listener exited normally. Bye.")
Exemplo n.º 38
0
    parser.add_argument("--model", type=str, required=True)
    parser.add_argument("--threshold", type=float, default=0.9)
    parser.add_argument("--robot_ip", type=str, required=True)
    args = parser.parse_args()

    model_file = args.model
    openpose_dir = args.openpose_dir
    if not os.path.isdir(openpose_dir):
        raise NotADirectoryError('openpose_dir must point to a directory')

    skeleton_q = Queue(maxsize=1)
    gesture_q = Queue(maxsize=1)
    stop = Event()

    t1 = Process(target=skeleton_loop, args=(skeleton_q, stop, openpose_dir))
    t1.start()
    t2 = Process(target=gesture_recognition_loop,
                 args=(skeleton_q, gesture_q, stop, args.threshold,
                       model_file))
    t2.start()

    display_image(stop, gesture_q, args.robot_ip)
    stop.set()

    # clear items in the queue to allow joining process
    clear_queue(skeleton_q)
    clear_queue(gesture_q)

    t1.join()
    t2.join()
Exemplo n.º 39
0
class TopologyPredictionDaemon(Process):
    """
    최신화된 호가 거래쌍을 받아서 토폴로지에 연산 처리후 사용 가능한 거래 명세를 반환합니다.
    """

    api = None  # type: UnsterblichContractClient

    logger = None
    is_running = None  # type: bool
    topology = None  # type: Topology

    pika_conn = None
    pika_channel = None

    base = None  # type: None
    balance = None  # type: None

    exit = None  # type: Event

    min_allow = None
    max_allow = None

    def __init__(self, topology: Topology, base, balance, min_allow,
                 max_allow):
        Process.__init__(self)  # 실제 동시 스레드로 구성해야 함.
        self.exit = Event()
        self.base = base
        self.balance = balance

        self.is_running = True
        self.topology = topology
        self.topology.wallet.set(self.base, self.balance)

        self.min_allow = min_allow
        self.max_allow = max_allow

    def __init_process(self):

        self.pika_conn = create_pika_connection()
        self.pika_channel = self.pika_conn.channel()
        self.pika_channel.exchange_declare(PIKA_EXCHANGE,
                                           exchange_type=PIKA_EXCHANGE_TYPE)

        result = self.pika_channel.queue_declare('',
                                                 exclusive=True,
                                                 auto_delete=True)
        queue_name = result.method.queue  # create unique queue!

        self.pika_channel.queue_bind(exchange=PIKA_EXCHANGE,
                                     queue=queue_name,
                                     routing_key="*")
        self.pika_channel.basic_consume(queue=queue_name,
                                        on_message_callback=self.pika_callback,
                                        auto_ack=True)

    def pika_callback(self, ch, method, properties, body):
        market = method.routing_key
        self.orderbook_updated(market)

    def orderbook_updated(self, market: str):

        if market not in self.topology.markets:
            return

        else:
            self.logger.debug("received / refresh %s" % market)

            # avails = self.topology.update_and_verify(market)
            avails = self.topology.update_and_verify()
            for avail in avails:  # Transaction, Profit

                transaction = avail[0]
                profit = avail[1]

                self.logger.info("AVAIL %s = %s" % (transaction, profit))
                process_flag = (profit >= self.min_allow) and (profit <=
                                                               self.max_allow)

                self.logger.info("AVAIL Process F = %s" % process_flag)

                if process_flag:
                    self.logger.info("Send AVAIL to service!")
                    self.api.contract_chained_transactions(
                        maximum_balance=self.balance,
                        transactions=transaction.serialize(),
                        profit=profit)

    def shutdown(self):
        self.is_running = False
        self.exit.set()

    def run(self):
        self.is_running = True
        self.api = UnsterblichContractClient()
        self.logger = create_logger("TopologyDaemon_(%s)" % os.getpid())

        self.__init_process()
        self.logger.info("%s개 토폴로지 로드." % len(self.topology))
        self.logger.info("%s / %s" % (self.base, self.balance))

        while self.is_running:
            try:
                self.pika_channel.start_consuming()
            except Exception as e:
                if not self.is_running: return
                self.logger.warning("exception raised! %s" % e)
                self.__init_process()
            if not self.is_running: return
Exemplo n.º 40
0
        self.event = event

    def run(self):
        """
        Wartet auf das Event und legt das Ergebnis
        in den geteilten Speicher
        :return: None
        """
        self.event.wait()
        summe = 0
        for i in range(self.v.value + 1):
            summe += i
        self.v.value = summe


if __name__ == "__main__":
    v = Value('i', 0)
    e = Event()
    # Prozess starten
    calculator = CalculatorProcess(v, e)
    calculator.start()
    n = int(
        input(
            "Bis zu welcher Zahl möchten Sie die Summe von 1 bis n berechnen?")
    )
    v.value = n
    # Signal geben, sobald Benutzer Eingabe getätigt hat
    e.set()
    calculator.join()
    print("Das ergebnis ist " + str(v.value))
Exemplo n.º 41
0
class Pool():
    """Distributes tasks to a number of worker processes.
  New tasks can be added dynamically even after the workers have been started.
  Requirement: Tasks can only be added from the parent process, e.g. while
  consuming the results generator."""

    # Factor to calculate the maximum number of items in the work/done queue.
    # Necessary to not overflow the queue's pipe if a keyboard interrupt happens.
    BUFFER_FACTOR = 4

    def __init__(self, num_workers, heartbeat_timeout=30):
        self.num_workers = num_workers
        self.processes = []
        self.terminated = False

        # Invariant: processing_count >= #work_queue + #done_queue. It is greater
        # when a worker takes an item from the work_queue and before the result is
        # submitted to the done_queue. It is equal when no worker is working,
        # e.g. when all workers have finished, and when no results are processed.
        # Count is only accessed by the parent process. Only the parent process is
        # allowed to remove items from the done_queue and to add items to the
        # work_queue.
        self.processing_count = 0
        self.heartbeat_timeout = heartbeat_timeout

        # Disable sigint to make multiprocessing data structure inherit it and
        # ignore ctrl-c
        with without_sigint():
            self.work_queue = Queue()
            self.done_queue = Queue()
            self.pause_event = Event()
            self.read_again_event = Event()

    def imap_unordered(self,
                       fn,
                       gen,
                       process_context_fn=None,
                       process_context_args=None):
        """Maps function "fn" to items in generator "gen" on the worker processes
    in an arbitrary order. The items are expected to be lists of arguments to
    the function. Returns a results iterator. A result value of type
    MaybeResult either indicates a heartbeat of the runner, i.e. indicating
    that the runner is still waiting for the result to be computed, or it wraps
    the real result.

    Args:
      process_context_fn: Function executed once by each worker. Expected to
          return a process-context object. If present, this object is passed
          as additional argument to each call to fn.
      process_context_args: List of arguments for the invocation of
          process_context_fn. All arguments will be pickled and sent beyond the
          process boundary.
    """
        if self.terminated:
            return
        try:
            internal_error = False
            gen = iter(gen)
            self.advance = self._advance_more

            # Disable sigint to make workers inherit it and ignore ctrl-c
            with without_sigint():
                for w in xrange(self.num_workers):
                    p = Process(target=Worker,
                                args=(fn, self.work_queue, self.done_queue,
                                      self.pause_event, self.read_again_event,
                                      process_context_fn,
                                      process_context_args))
                    p.start()
                    self.processes.append(p)

            self.advance(gen)
            while self.processing_count > 0:
                while True:
                    try:
                        result = self._get_result_from_queue()
                    except:
                        # TODO(machenbach): Handle a few known types of internal errors
                        # gracefully, e.g. missing test files.
                        internal_error = True
                        continue
                    yield result
                    break

                self.advance(gen)
        except KeyboardInterrupt:
            raise
        except Exception as e:
            traceback.print_exc()
            print(">>> EXCEPTION: %s" % e)
        finally:
            self.terminate()

        if internal_error:
            raise Exception("Internal error in a worker process.")

    def _advance_more(self, gen):
        while self.processing_count < self.num_workers * self.BUFFER_FACTOR:
            try:
                self.work_queue.put(gen.next())
                self.processing_count += 1
            except StopIteration:
                self.advance = self._advance_empty
                break

    def _advance_empty(self, gen):
        pass

    def add(self, args):
        """Adds an item to the work queue. Can be called dynamically while
    processing the results from imap_unordered."""
        assert not self.terminated

        self.work_queue.put(args)
        self.processing_count += 1

    def terminate(self):
        """Terminates execution and waits for ongoing jobs."""
        # Iteration but ignore the results
        list(self.terminate_with_results())

    def terminate_with_results(self):
        """Terminates execution and waits for ongoing jobs. It's a generator
    returning heartbeats and results for all jobs that started before calling
    terminate.
    """
        if self.terminated:
            return
        self.terminated = True

        self.pause_event.set()

        # Drain out work queue from tests
        try:
            while self.processing_count > 0:
                self.work_queue.get(True, 1)
                self.processing_count -= 1
        except Empty:
            pass

        # Make sure all processes stop
        for _ in self.processes:
            # During normal tear down the workers block on get(). Feed a poison pill
            # per worker to make them stop.
            self.work_queue.put("STOP")

        # Workers stopped reading work queue if stop event is true to not overtake
        # main process that drains the queue. They should read again to consume
        # poison pill and possibly more tests that we couldn't get during draining.
        self.read_again_event.set()

        # Wait for results
        while self.processing_count:
            result = self._get_result_from_queue()
            if result.heartbeat or result.value:
                yield result

        for p in self.processes:
            p.join()

    def _get_result_from_queue(self):
        try:
            result = self.done_queue.get(timeout=self.heartbeat_timeout)
            self.processing_count -= 1
        except Empty:
            return MaybeResult.create_heartbeat()

        if result.exception:
            raise result.exception

        return MaybeResult.create_result(result.result)
Exemplo n.º 42
0
e_kill.clear()

# read socket settings settings from config
config = ConfigParser.SafeConfigParser()
config.read('default.cfg')

device = "udp:localhost:" + str(config.getint("SocketOut", "PORT_UDP_TUNER"))

p_linkin = Process(target=link.input, args=(
    q_plot,
    e_pause,
    e_kill,
    device,
))
p_linkin.start()
e_pause.set()
#}}}

SCOPE_LEN = 200  # количество точек на экране

import fir
taps = fir.get_taps()
print taps

app = QtGui.QApplication([])
win = pg.GraphicsWindow(title="Basic plotting examples")
# win.resize(1024,600)

p = win.addPlot(title="Updating p")

curve_raw = p.plot(pen='w')
Exemplo n.º 43
0
    def _run_tests(self, tests, **kwargs):
        # tests = dict where the key is a test group name and the value are
        # the tests to run
        tests_queue = Queue()
        results_queue = Queue()
        stop_event = Event()

        pending_tests = {}
        pending_not_thread_safe_tests = {}
        completed_tests = {}
        failures = 0
        errors = 0

        start_time = time.time()
        # First tun tests which are not thread safe in the main process
        for group in self._not_thread_safe:
            if group not in tests.keys():
                continue

            group_tests = tests[group]
            del tests[group]

            logger.info('Running tests in a main process: %s' % (group_tests))
            pending_not_thread_safe_tests[group] = group_tests
            result = self._tests_func(tests=group_tests, worker_index=None)
            results_queue.put((group, result), block=False)

        for group, tests in tests.iteritems():
            tests_queue.put((group, tests), block=False)
            pending_tests[group] = tests

        worker_count = self._worker_count
        if worker_count == 'auto':
            worker_count = len(pending_tests)
        elif worker_count == 'cpu':
            worker_count = multiprocessing.cpu_count()

        if worker_count > len(pending_tests):
            # No need to spawn more workers then there are tests.
            worker_count = len(pending_tests)

        worker_max = self._worker_max
        if worker_max == 'auto':
            worker_max = len(pending_tests)
        elif worker_max == 'cpu':
            worker_max = multiprocessing.cpu_count()

        if worker_count > worker_max:
            # No need to spawn more workers then there are tests.
            worker_count = worker_max

        worker_args = (tests_queue, results_queue, stop_event)
        logger.info("Number of workers %s " % worker_count)
        workers = self._create_worker_pool(pool_size=worker_count,
                                           target_func=self._run_tests_worker,
                                           worker_args=worker_args)

        for index, worker in enumerate(workers):
            logger.info('Staring worker %s' % (index))
            worker.start()

        if workers:
            while pending_tests:
                try:
                    try:
                        group, result = results_queue.get(
                            timeout=self._parent_timeout, block=True)
                    except Exception:
                        raise Empty

                    try:
                        if group not in pending_not_thread_safe_tests:
                            pending_tests.pop(group)
                        else:
                            pending_not_thread_safe_tests.pop(group)
                    except KeyError:
                        logger.info('Got a result for unknown group: %s' %
                                    (group))
                    else:
                        completed_tests[group] = result
                        self._print_result(result)

                        if result.failures or result.errors:
                            failures += len(result.failures)
                            errors += len(result.errors)

                            if self.failfast:
                                # failfast is enabled, kill all the active workers
                                # and stop
                                for worker in workers:
                                    if worker.is_alive():
                                        worker.terminate()
                                break
                except Empty:
                    worker_left = False

                    for worker in workers:
                        if worker.is_alive():
                            worker_left = True
                            break

                    if not worker_left:
                        break

        # We are done, signalize all the workers to stop
        stop_event.set()

        end_time = time.time()
        self._exit(start_time, end_time, failures, errors)
Exemplo n.º 44
0
class PlantDraw(object):
    def __init__(self, plant, refresh_period=(1.0/240),
                 name='PlantDraw', *args, **kwargs):
        super(PlantDraw, self).__init__()
        self.name = name
        self.plant = plant
        self.drawing_thread = None
        self.polling_thread = None

        self.dt = refresh_period
        self.exec_time = time()
        self.scale = 150  # pixels per meter

        self.center_x = 0
        self.center_y = 0
        self.running = Event()

        self.polling_pipe, self.drawing_pipe = Pipe()

    def init_ui(self):
        self.fig = plt.figure(self.name)
        self.ax = plt.gca()
        self.ax.set_xlim([-1.5, 1.5])
        self.ax.set_ylim([-1.5, 1.5])
        self.ax.set_aspect('equal', 'datalim')
        self.ax.grid(True)
        self.fig.canvas.draw()
        self.bg = self.fig.canvas.copy_from_bbox(self.ax.bbox)
        self.cursor = Cursor(self.ax, useblit=True, color='red', linewidth=2)
        self.init_artists()
        #plt.ion()
        plt.show(False)

    def drawing_loop(self, drawing_pipe):
        # start the matplotlib plotting
        self.init_ui()

        while self.running.is_set():
            exec_time = time()
            # get any data from the polling loop
            updts = None
            while drawing_pipe.poll():
                data_from_plant = drawing_pipe.recv()
                if data_from_plant is None:
                    self.running.clear()
                    break

                # get the visuzlization updates from the latest state
                state, t = data_from_plant
                updts = self.update(state, t)
                self.update_canvas(updts)

            # sleep to guarantee the desired frame rate
            exec_time = time() - exec_time
            plt.waitforbuttonpress(max(self.dt-exec_time, 1e-9))
        self.close()

    def close(self):
        # close the matplotlib windows, clean up
        #plt.ioff()
        plt.close(self.fig)

    def update(self, *args, **kwargs):
        updts = self._update(*args, **kwargs)
        self.update_canvas(updts)

    def _update(self, *args, **kwargs):
        msg = "You need to implement the self._update() method in your\
 PlantDraw class."
        raise NotImplementedError(msg)

    def init_artists(self, *args, **kwargs):
        msg = "You need to implement the self.init_artists() method in your\
 PlantDraw class."
        raise NotImplementedError(msg)

    def update_canvas(self, updts):
        if updts is not None:
            # update the drawing from the plant state
            self.fig.canvas.restore_region(self.bg)
            for artist in updts:
                self.ax.draw_artist(artist)
            self.fig.canvas.update()
            # sleep to guarantee the desired frame rate
            exec_time = time() - self.exec_time
            plt.waitforbuttonpress(max(self.dt-exec_time, 1e-9))
        self.exec_time = time()

    def polling_loop(self, polling_pipe):
        current_t = -1
        while self.running.is_set():
            exec_time = time()
            state, t = self.plant.get_state(noisy=False)
            if t != current_t:
                polling_pipe.send((state, t))

            # sleep to guarantee the desired frame rate
            exec_time = time() - exec_time
            sleep(max(self.dt-exec_time, 0))

    def start(self):
        print_with_stamp('Starting drawing loop', self.name)
        self.drawing_thread = Process(target=self.drawing_loop,
                                      args=(self.drawing_pipe, ))
        self.drawing_thread.daemon = True
        self.polling_thread = Thread(target=self.polling_loop,
                                     args=(self.polling_pipe, ))
        self.polling_thread.daemon = True
        # self.drawing_thread = Process(target=self.run)
        self.running.set()
        self.polling_thread.start()
        self.drawing_thread.start()

    def stop(self):
        self.running.clear()

        if self.drawing_thread is not None and self.drawing_thread.is_alive():
            # wait until thread stops
            self.drawing_thread.join(10)

        if self.polling_thread is not None and self.polling_thread.is_alive():
            # wait until thread stops
            self.polling_thread.join(10)

        print_with_stamp('Stopped drawing loop', self.name)
Exemplo n.º 45
0
class YasmMPReceiver(object):
    def __init__(self, yasm_cfg, yasmapi_timeout):
        """

        :type data_queue: Queue
        :type yasm_cfg: YasmCfg
        """
        self.panels = yasm_cfg.panels
        self.data_queue = Queue()
        self.timeout = yasmapi_timeout
        self._data_buffer = []
        self._start_event = Event()
        self._stop_event = Event()
        self.ps_pool = {
            panel.alias: Process(target=self.single_receiver, args=(panel, ))
            for panel in self.panels
        }
        self.consumers = {
            panel.alias: Thread(target=self.single_controller,
                                args=(panel, self.ps_pool[panel.alias]))
            for panel in self.panels
        }

    def get_buffered_data(self):
        data, self._data_buffer = self._data_buffer, []
        return data

    def start_collecting(self):
        [p.start() for p in self.ps_pool.values()]

    def start_transmitting(self):
        self._start_event.set()
        [consumer.start() for consumer in self.consumers.values()]

    def single_receiver(self, panel):
        # ignore SIGINT (process is controlled by .stop_event)
        """

        :type panel: Panel
        """
        signal.signal(signal.SIGINT, signal.SIG_IGN)

        stream = signals_stream(panel)
        try:
            while not panel.stop_trigger.is_set():
                ts, data = stream.next()
                if self._start_event.is_set():
                    panel.queue.put((ts, {panel.alias: data}))
        finally:
            logger.info('Closing panel {} receiver thread'.format(panel.alias))

    def single_controller(self, panel, ps):
        """

        :type ps: Process
        :type panel: Panel
        """
        while not panel.stop_trigger.is_set():
            try:
                ts, data = panel.queue.get(timeout=self.timeout)
                panel.last_ts = ts
                # logger.info('Received monitoring data for {}'.format(ts))
                self._data_buffer.append(monitoring_data(ts, data))
            except Empty:
                logger.warning(
                    'Not receiving any data from YASM. Probably your hosts/tags specification is not correct'
                )
                panel.stop_trigger.set()
                if ps.is_alive():
                    ps.terminate()
                break
            except KeyboardInterrupt:
                logging.warning(
                    'Interrupting collecting metrics for panel {}'.format(
                        panel.alias))
                panel.stop_trigger.set()
                if ps.is_alive():
                    ps.terminate()
                break

    def stop_now(self):
        end_time = time.time()
        active_panels = self.panels
        while len(active_panels) > 0:
            try:
                for panel in active_panels:
                    if panel.last_ts < end_time and not self._stop_event.is_set(
                    ):
                        logger.info(
                            'Waiting for yasm metrics for panel {}'.format(
                                panel.alias))
                    else:
                        panel.stop_trigger.set()
                        self.ps_pool[panel.alias].join()
                        self.consumers[panel.alias].join()
                active_panels = [
                    panel for panel in active_panels
                    if not panel.stop_trigger.is_set()
                ]
                if len(active_panels) > 0:
                    time.sleep(5)
            except KeyboardInterrupt:
                logger.info('Metrics receiving interrupted')
                [panel.stop_trigger.set() for panel in active_panels]
                [(self.ps_pool[panel.alias].join(),
                  self.consumers[panel.alias].join())
                 for panel in active_panels]
Exemplo n.º 46
0
class ParallelMappedDataset(ProxiedDataset):
    """
    Transform samples to mapped samples which is similar to 'basic.MappedDataset',
    but multiple workers (threads or processes) will be used

    Notes:
        this class is not thread-safe
    """
    def __init__(self, source, mapper, worker_args):
        super(ParallelMappedDataset, self).__init__(source)
        worker_args = {k.lower(): v for k, v in worker_args.items()}

        args = {'bufsize': 100, 'worker_num': 8}
        args.update(worker_args)
        self._worker_args = args
        self._started = False
        self._source = source
        self._mapper = mapper
        self._exit = False
        self._setup()

    def _setup(self):
        """setup input/output queues and workers """
        use_process = False
        if 'use_process' in self._worker_args:
            use_process = self._worker_args['use_process']
        if use_process and sys.platform == "win32":
            logger.info("Use multi-thread reader instead of "
                        "multi-process reader on Windows.")
            use_process = False

        bufsize = self._worker_args['bufsize']
        if use_process:
            from .shared_queue import SharedQueue as Queue
            from multiprocessing import Process as Worker
            from multiprocessing import Event
        else:
            if six.PY3:
                from queue import Queue
            else:
                from Queue import Queue
            from threading import Thread as Worker
            from threading import Event

        self._inq = Queue(bufsize)
        self._outq = Queue(bufsize)
        consumer_num = self._worker_args['worker_num']

        id = str(uuid.uuid4())[-3:]
        self._producer = threading.Thread(target=self._produce,
                                          args=('producer-' + id, self._source,
                                                self._inq))
        self._producer.daemon = True

        self._consumers = []
        for i in range(consumer_num):
            p = Worker(target=self._consume,
                       args=('consumer-' + id + '_' + str(i), self._inq,
                             self._outq, self._mapper))
            self._consumers.append(p)
            p.daemon = True

        self._epoch = -1
        self._feeding_ev = Event()
        self._produced = 0  # produced sample in self._produce
        self._consumed = 0  # consumed sample in self.next
        self._stopped_consumers = 0

    def _produce(self, id, source, inq):
        """Fetch data from source and feed it to 'inq' queue"""
        while True:
            self._feeding_ev.wait()
            if self._exit:
                break
            try:
                inq.put(source.next())
                self._produced += 1
            except StopIteration:
                self._feeding_ev.clear()
                self._feeding_ev.wait()  # wait other guy to wake up me
                logger.debug("producer[{}] starts new epoch".format(id))
            except Exception as e:
                msg = "producer[{}] failed with error: {}".format(id, str(e))
                inq.put(EndSignal(-1, msg))
                break

        logger.debug("producer[{}] exits".format(id))

    def _consume(self, id, inq, outq, mapper):
        """Fetch data from 'inq', process it and put result to 'outq'"""
        while True:
            sample = inq.get()
            if isinstance(sample, EndSignal):
                sample.errmsg += "[consumer[{}] exits]".format(id)
                outq.put(sample)
                logger.debug("end signal received, " +
                             "consumer[{}] exits".format(id))
                break

            try:
                result = mapper(sample)
                outq.put(result)
            except Exception as e:
                msg = 'failed to map consumer[%s], error: {}'.format(
                    str(e), id)
                outq.put(EndSignal(-1, msg))
                break

    def drained(self):
        assert self._epoch >= 0, "first epoch has not started yet"
        return self._source.drained() and self._produced == self._consumed

    def stop(self):
        """ notify to exit
        """
        self._exit = True
        self._feeding_ev.set()
        for _ in range(len(self._consumers)):
            self._inq.put(EndSignal(0, "notify consumers to exit"))

    def next(self):
        """ get next transformed sample
        """
        if self._epoch < 0:
            self.reset()

        if self.drained():
            raise StopIteration()

        while True:
            sample = self._outq.get()
            if isinstance(sample, EndSignal):
                self._stopped_consumers += 1
                if sample.errno != 0:
                    logger.warn("consumer failed with error: {}".format(
                        sample.errmsg))

                if self._stopped_consumers < len(self._consumers):
                    self._inq.put(sample)
                else:
                    raise ValueError("all consumers exited, no more samples")
            else:
                self._consumed += 1
                return sample

    def reset(self):
        """ reset for a new epoch of samples
        """
        if self._epoch < 0:
            self._epoch = 0
            for p in self._consumers:
                p.start()
            self._producer.start()
        else:
            if not self.drained():
                logger.warn("do not reset before epoch[%d] finishes".format(
                    self._epoch))
                self._produced = self._produced - self._consumed
            else:
                self._produced = 0

            self._epoch += 1

        assert self._stopped_consumers == 0, "some consumers already exited," \
            + " cannot start another epoch"

        self._source.reset()
        self._consumed = 0
        self._feeding_ev.set()
Exemplo n.º 47
0
class Bluetooth():
    def __init__(self, address, name="HC-05", passkey="1234", port=1):
        """
        address: hexadecimal address in string
        name: BT device display name - string
        passkey: string
        port: integer
        """
        self.address = address
        self.name = name
        self.passkey = passkey
        self.port = port
        self.socket = None
        self.socket_in_use = Event()
        self.socket_in_use.set()

        self.string_buffer = ''

    def connect(self):
        logging.info("Kill other bt-agent processes.")
        subprocess.call("kill -9 `pidof bt-agent`", shell=True)
        logging.info("Set passkey for BT agent.")
        status = subprocess.call("bt-agent " + self.passkey + " &", shell=True)
        try:
            self.socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
            self.socket.connect((self.address, self.port))
        except bluetooth.btcommon.BluetoothError as err:
            logging.error('BT Connection error!')
            raise err

    def send(self, msg, echo=False, max_msg_len=128):
        """
        Args:
            - msg: string message
            - echo: wait for echo? boolean
            - max_msg_len: integer - max message lenght to send 
            (n receive on echo if present)
        Returns:
            - None if echo is False - otherwise it will be
            blocked on echo msg and it will return that.
        Raises:
            - AssertionError if msg is longer than limit.
        """
        assert len(msg) <= max_msg_len, "Message is to long"
        self.socket_in_use.wait()
        self.socket_in_use.clear()
        self.socket.send(msg)
        self.socket_in_use.set()
        if echo:
            echo_msg = self.socket.recv(max_msg_len)
            return echo_msg
        else:
            return None

    def receive(self, size=1024, process_msg=False):
        msg = None
        while msg == None:
            self.socket_in_use.wait()
            self.socket_in_use.clear()
            msg = self.socket.recv(size)

            msg = str(msg, 'utf-8')
            if process_msg:
                msg = self.process_raw_message(msg)
            self.socket_in_use.set()

        return msg

    def close(self):
        self.socket.close()

    def process_raw_message(self, msg):
        self.string_buffer += msg
        rec_start = self.string_buffer.find('[')
        if rec_start != -1:
            self.string_buffer = self.string_buffer[(rec_start + 1):]
        rec_end = self.string_buffer.find(']')
        if rec_end != -1:
            frame = self.string_buffer[:rec_end]
            self.string_buffer = self.string_buffer[rec_end + 1:]
            return frame

        return None
Exemplo n.º 48
0
class ServerProc(object):
    def __init__(self, scheme=None):
        self.proc = None
        self.daemon = None
        self.stop = Event()
        self.scheme = scheme

    def start(self, init_func, host, port, paths, routes, bind_address, config, **kwargs):
        self.proc = Process(target=self.create_daemon,
                            args=(init_func, host, port, paths, routes, bind_address,
                                  config),
                            name='%s on port %s' % (self.scheme, port),
                            kwargs=kwargs)
        self.proc.daemon = True
        self.proc.start()

    def create_daemon(self, init_func, host, port, paths, routes, bind_address,
                      config, **kwargs):
        if sys.platform == "darwin":
            # on Darwin, NOFILE starts with a very low limit (256), so bump it up a little
            # by way of comparison, Debian starts with a limit of 1024, Windows 512
            import resource  # local, as it only exists on Unix-like systems
            maxfilesperproc = int(subprocess.check_output(
                ["sysctl", "-n", "kern.maxfilesperproc"]
            ).strip())
            soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
            # 2048 is somewhat arbitrary, but gives us some headroom for wptrunner --parallel
            # note that it's expected that 2048 will be the min here
            new_soft = min(2048, maxfilesperproc, hard)
            if soft < new_soft:
                resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, hard))
        try:
            self.daemon = init_func(host, port, paths, routes, bind_address, config, **kwargs)
        except socket.error:
            logger.critical("Socket error on port %s" % port, file=sys.stderr)
            raise
        except Exception:
            logger.critical(traceback.format_exc())
            raise

        if self.daemon:
            try:
                self.daemon.start(block=False)
                try:
                    self.stop.wait()
                except KeyboardInterrupt:
                    pass
            except Exception:
                print(traceback.format_exc(), file=sys.stderr)
                raise

    def wait(self):
        self.stop.set()
        self.proc.join()

    def kill(self):
        self.stop.set()
        self.proc.terminate()
        self.proc.join()

    def is_alive(self):
        return self.proc.is_alive()
Exemplo n.º 49
0
class Pipeline:
    def __init__(self, scraper, databaser, tor_ip_changer):
        """Webscraping pipeline.

        How it works:
        1. generate URLs for item list pages
        2. scrape item URLs from list pages
        3. store scraped item URLs in the DB
        4. load item URLs from the DB
        5. collect item properties
        6. store collected data in the DB

        :argument scraper: a scraper instance
        :type scraper: object
        :argument databaser: a databaser instance
        :type databaser: object
        :argument tor_ip_changer: a TorIpChanger instance
        :type tor_ip_changer: object
        """
        self.scraper = scraper
        self.databaser = databaser
        self.tor_ip_changer = tor_ip_changer

        self.workers_count = Config.WORKERS_COUNT

        self.workers = []

    def prepare_pipeline(self):
        """Prepare all necessary multithreading and multiprocessing objects."""
        self.url_queue = Queue()
        self.response_queue = Queue()
        self.data_queue = Queue()

        self.pool = ThreadPoolExecutor(self.workers_count)

        self.producing_urls_in_progress = Event()
        self.requesting_in_progress = Event()
        self.scraping_in_progress = Event()

        self.urls_to_process = Value("i", 0)
        self.urls_processed = Value("i", 0)
        self.urls_bucket_empty = Value("i", 1)

    def inform(self, message, log=True, end="\n"):
        """Print and if set log a message.

        :argument message:
        :type message: str
        :argument log: flag to also log the message
        :type log: bool
        :argument end: message line end
        :type end: str
        """
        if log:
            logging.info(message)

        print("{0} {1}".format(get_current_datetime(), message), end=end)

    def _inform_progress(self):
        """Print a message about how the scraping is progressing."""
        try:
            message = "Processed {0} ({1:.2f}%) URLs".format(
                self.urls_processed.value,
                self.urls_processed.value / self.urls_to_process.value * 100,
            )
            self.inform(message, log=False, end="\r")
        except ZeroDivisionError:
            pass

    def change_ip(self):
        """Change IP address.

        By default, IP is changed after each bunch of URLs is requested.
        """
        try:
            new_ip = self.tor_ip_changer.get_new_ip()
            logging.info("New IP: {new_ip}".format(new_ip=new_ip))
        except:  # noqa
            logging.error("Failed setting new IP")
            return self.change_ip()

    def generate_list_urls(self):
        """Create a generator for populating `url_queue` with list URLs."""
        put_urls = 0
        for list_url in self.scraper.generate_list_urls():
            self.url_queue.put(list_url)

            put_urls += 1
            if put_urls == self.workers_count:
                put_urls = 0
                yield

    def generate_item_urls(self):
        """Create a generator for populating `url_queue` with item URLs."""
        query = self.databaser.get_item_urls()

        put_urls = 0
        for item_url in query.yield_per(self.workers_count):
            self.url_queue.put(item_url[0])

            put_urls += 1
            if put_urls == self.workers_count:
                put_urls = 0
                yield

    def _classify_response(self, response):
        """Examine response and put it to 'response_queue' if it's OK or put
        it's URL  back to 'url_queue'.

        :argument response:
        :type response: request.response
        """
        if not response.ok and response.status_code >= 408:
            self.url_queue.put(response.url)
        else:
            self.response_queue.put(response)

    def _actually_get_html(self, urls):
        """Request provided URLs running multiple processes.

        :argument urls: URLs to get data from
        :type urls: list
        """
        try:
            self.requesting_in_progress.set()
            for response in self.pool.map(get, urls):
                self._classify_response(response)
        except Exception as exc:
            logging.error("Failed scraping URLs")
            logging.exception(exc)
        finally:
            self.requesting_in_progress.clear()

    def get_html(self, urls_generator):
        """Get HTML for URLs from 'url_queue'."""
        run = True
        self.inform("URLs to process: {}".format(self.urls_to_process.value))
        self.producing_urls_in_progress.set()

        while run:
            try:
                next(urls_generator)
            except StopIteration:
                self.producing_urls_in_progress.clear()

            urls_bucket = []
            self.urls_bucket_empty.value = 1
            for _ in range(0, self.workers_count):
                url = self.url_queue.get()

                if url == EXIT:
                    run = False
                    break
                elif url == DUMP_URLS_BUCKET:
                    break

                urls_bucket.append(url)
                if self.urls_bucket_empty.value:
                    self.urls_bucket_empty.value = 0

            if urls_bucket:
                self._actually_get_html(urls_bucket)

            if run:
                self.change_ip()

    def _scrape_data(self, response):
        """Scrape HTML provided by the given response.

        :argument response:
        :type response: request.response

        :returns dict
        """
        if self.scraper.list_url_template in response.url:
            data = self.scraper.get_item_urls(response)
        else:
            data = self.scraper.get_item_properties(response)

        return data

    def _actually_collect_data(self, response):
        """Collect data from the given response.

        :argument response:
        :type response: request.response
        """
        try:
            self.scraping_in_progress.set()

            data = self._scrape_data(response)
            if data:
                self.data_queue.put(data)
        except Exception as exc:
            logging.error(
                'Failed processing response for "{}"'.format(response.url)
            )
            logging.exception(exc)
        finally:
            self.scraping_in_progress.clear()

    def collect_data(self):
        """Get data for responses from 'response_queue'."""
        while True:
            response = self.response_queue.get()

            if response == EXIT:
                break

            self._actually_collect_data(response)

    def _store_item_urls(self, data):
        """Handle storing item URLs.

        :argument data: item URLs
        :type data: list
        """
        if not data:
            return

        self.databaser.insert_multiple(data, self.databaser.item_urls_table)

    def _store_item_properties(self, data):
        """Handle storing item properties.

        :argument data: item properties
        :type data: dict
        """
        if len(data) > 1:
            # NOTE: if there is only a single item in the data dict (the URL),
            # there is no point in storing it.
            self.databaser.insert(data, self.databaser.item_data_table)

        # Remove processed item URL.
        self.databaser.delete_url(data["url"])

    def _actually_store_data(self, data):
        """Store provided data in the DB.

        :argument data: data to store in the DB
        :type data: str or list or dict
        """
        try:
            if isinstance(data, list):
                self._store_item_urls(data)
            else:
                self._store_item_properties(data)
        except Exception as exc:
            logging.error("Failed storing data")
            logging.exception(exc)
        finally:
            self.urls_processed.value += 1

    def store_data(self):
        """Consume 'data_queue' and store provided data in the DB."""
        self.urls_processed.value = 0

        while True:
            data = self.data_queue.get()

            if data == EXIT:
                break

            self._actually_store_data(data)

        self.databaser.commit()

    def exit_workers(self):
        """Exit workers started as separate processes by passing an EXIT
        message to all queues. This action leads to exiting `while` loops which
        workers processes run in.
        """
        self.inform("Exiting workers, please wait ...")

        self.url_queue.put(EXIT)
        self.response_queue.put(EXIT)
        self.data_queue.put(EXIT)

    def _queues_empty(self):
        """Check if queues are empty.

        :returns bool
        """
        return (
            self.url_queue.empty()
            and self.response_queue.empty()
            and self.data_queue.empty()
        )

    def _workers_idle(self):
        """Check if workers are idle.

        :returns bool
        """
        return (
            not self.producing_urls_in_progress.is_set()
            and not self.requesting_in_progress.is_set()
            and not self.scraping_in_progress.is_set()
        )

    def switch_power(self):
        """Check when to exit workers so the program won't run forever."""
        while True:
            # Check if workers can end.
            if (
                self._queues_empty()
                and self._workers_idle()
                and self.urls_bucket_empty.value
            ):
                self.exit_workers()
                break

            # Ensure all URLs are processed.
            if (
                self._queues_empty()
                and self._workers_idle()
                and not self.urls_bucket_empty.value
            ):
                logging.info("Dumping URLs bucket")
                self.url_queue.put(DUMP_URLS_BUCKET)

            # Inform about the progress.
            self._inform_progress()
            time.sleep(5)

    def employ_worker(self, target):
        """Create and register a daemon worker process.

        :argument target: worker's task
        :type target: function
        """
        worker = Process(target=target)
        worker.daemon = True
        worker.start()

        self.workers.append(worker)

    def release_workers(self):
        """Wait till all worker daemons are finished."""
        for worker in self.workers:
            worker.join()

    def run(self, target, urls_count, generate_url_function):
        self.inform("Collecting item {0}".format(target))
        self.urls_to_process.value = urls_count

        # response_queue --> data_queue.
        self.employ_worker(self.collect_data)

        # data_queue --> DB.
        self.employ_worker(self.store_data)

        # Prevent running forever.
        self.employ_worker(self.switch_power)

        # NOTE Execution will block until 'get_html' is finished.
        # url_queue --> response_queue.
        urls_generator = generate_url_function()
        self.get_html(urls_generator)

        self.release_workers()

    def get_item_urls(self):
        """Get item URLs from item list pages."""
        urls_count = self.scraper.list_urls_count
        self.run("URLs", urls_count, self.generate_list_urls)

    def get_item_properties(self):
        """Get item properties from item pages."""
        urls_count = self.databaser.get_item_urls().count()
        self.run("properties", urls_count, self.generate_item_urls)
Exemplo n.º 50
0
class BatchManager:
    """
    Creates an input queue for Tensorflow, managing example creation and
    examples aggregation on multiple processes.
    """
    def __init__(self,
                 MAX_CAPACITY: int=10,
                 batch_size: int=3,
                 generator_fun=[lambda: 1],
                 postprocess_fun=None,
                 timeout: int=360):
        """
        Creates the DataGenerator and DataAggregator processes and starts them.
        Use with a with statement, as it will close processes automatically.

        @params:
        MAX_CAPACITY (int): Maximum number of batches or examples in
                            DataGenerator and DataAggregator queues
        batch_size (int): The number of examples in a batch
        generator_fun (list): List of callables that generates an example. One
                              DataGenerator process per element in the list will
                              be created.
        timeout (int):      Maximum time to retrieve a batch. Default to 60s,
                            change if generating a batch takes longer.

        @returns:
        """
        self.timeout = timeout
        self.generator_fun = generator_fun
        self.MAX_CAPACITY = MAX_CAPACITY
        self.batch_size = batch_size
        self.postprocess_fun = postprocess_fun
        self.stop_event = None
        self.data_q = None
        self.batch_q = None
        self.n_in_queue = None
        self.data_aggregator = None
        self.data_generators = None
    
        self.init()
    
    def init(self):
        self.stop_event = Event()
        self.data_q = Queue(self.MAX_CAPACITY)
        self.batch_q = Queue(self.MAX_CAPACITY)
        self.n_in_queue = Counter()
        self.data_aggregator = DataAggregator(self.data_q,
                                              self.batch_q,
                                              self.stop_event,
                                              self.batch_size,
                                              n_in_queue=self.n_in_queue,
                                              postprocess_fun=self.postprocess_fun)

        self.data_generators = [DataGenerator(self.data_q,
                                         self.stop_event,
                                         generator_fun=self.generator_fun[ii])
                                for ii in range(len(self.generator_fun))]
                                
        for w in self.data_generators:
            w.start()
        self.data_aggregator.start()

    def next_batch(self):
        """
        Ouput the next batch of examples in the queue

        @returns:
        """
        batch = None
        while batch is None:
            try:
                self.n_in_queue.increment(-1)
                batch = self.batch_q.get(timeout=self.timeout)
            except queue.Empty:
                print("Restarting data_generators")
                self.close()
                self.init()
        
        return batch

    def put_batch(self, batch):
        """
        Puts back a batch of examples in the queue

        @returns:
        """
        if not self.batch_q.full():
            self.batch_q.put(batch)
            self.n_in_queue.increment(1)

    def close(self, timeout: int = 5):
        """
        Terminate running processes

        @returns:
        """
        self.stop_event.set()

        for w in self.data_generators:
            w.join(timeout=timeout)
            while w.is_alive():
                w.terminate()
        self.data_aggregator.join(timeout=timeout)
        while self.data_aggregator.is_alive():
            self.data_aggregator.terminate()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        self.close()
Exemplo n.º 51
0
 def call(self, method, args):
     server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     for port in range(14000, 62000):
         try:
             server_socket.bind(("127.0.0.1", port))
             break
         except socket.error:
             continue
     server_socket.listen(5)
     ev = Event()
     input_thread = Process(target=input_reader, args=(port, ev))
     input_thread.start()
     (input_socket, _) = server_socket.accept()
     input_socket.setblocking(0)
     rpc_cmd = self.rpc(method, args)
     self._sendall(rpc_cmd)
     recv_buf = ""
     try:
         while True:
             rl, _, xl = select.select([self.ssh_channel, input_socket], [],
                                       [])
             if self.ssh_channel in rl:
                 if self.ssh_channel.recv_ready():
                     new_data = self.ssh_channel.recv(4096)
                     recv_buf += new_data
                     if "\n" in recv_buf:
                         lines = recv_buf.split("\n")
                         # Last line is either not complete or empty string.
                         # ("x\nnot compl".split("\n") => ['x', 'not compl'] or "x\n".split("\n") => ['x', ''])
                         # so we put it back in recv_buf for next iteration
                         recv_buf = lines.pop()
                         for line in lines:
                             resp = json.loads(line)
                             if "stdout" in resp:
                                 sys.stdout.write(resp["stdout"])
                                 sys.stdout.flush()
                             elif "stderr" in resp:
                                 log.white(resp["stderr"], f=sys.stderr)
                             elif "result" in resp:
                                 if resp['error'] is not None:
                                     raise SshRpcCallError(
                                         resp['error']['message'])
                                 #print("ending",method)
                                 return resp["result"]
                 if self.ssh_channel.recv_stderr_ready():
                     log.white("{}".format(
                         self.ssh_channel.recv_stderr(4096)))
                 if self.ssh_channel.exit_status_ready():
                     raise SshRpcError()
             if input_socket in rl:
                 new_stdin_data = input_socket.recv(1024)
                 self._sendall(self.stdin(new_stdin_data))
     except (KeyboardInterrupt, SshRpcError):
         self.ssh_channel.shutdown(2)
         self.ssh_channel = None
         raise KeyboardInterrupt()
     finally:
         ev.set()
         input_thread.terminate()
         input_thread.join()
         del input_socket
         del server_socket
Exemplo n.º 52
0
class ProcessBlock(Process, ABC):
    """
    The abstract class for a block/process in an execution pipeline
    """
    # Arbitrary timeout for blocking queue operations
    _poll_interval = 1

    def __init__(self, *args, parent=None, queue_size=0, **kwargs):
        super().__init__(*args, **kwargs)

        # Events (in the order they should be checked)
        self.events = OrderedDict([
            ("cancel", Event()),
            ("requeue", Event()),
            ("stop", Event()),
            ])

        # Corresponding event handlers
        self.event_handlers = {
            "cancel": self._cancel_handler,
            "requeue": self._requeue_handler,
            "stop": self._stop_handler,
            }

        # Master event, to be set after any other event
        self.event = Event()

        # The family of the processblock
        siblings = copy(parent.family.children) if parent is not None else []
        self.family = BlockFamily(parent, siblings, [])
        # Link family with self
        self.family.link(self)

        # The object queue
        self.objs = JoinableQueue(queue_size)
        # List of objects that were canceled and need re-processing
        self._canceled_objs = deque()

        # Logging facility
        self.logger = getLogger(self.name)

        # Object currently processed
        self._obj = None

    def start(self):
        super().__init__(name=self.name)
        super().start()

    @abstractmethod
    def process_obj(self, obj):
        """
        The actual work a block wants to perform on a object
        """
        raise NotImplementedError()

    def _stop_handler(self):
        """
        Send the "end object" (None) to every child
        """
        self.logger.debug("sending the 'end object' to child processes...")
        for _ in self.family.alive_children():
            self.objs.put(None)

    def cancel(self):
        """
        Set the cancel event and the master event
        """
        self.events["cancel"].set()
        self.event.set()

    def _cancel_handler(self):
        """
        Cancel children's objects and re-queue them in self._canceled_objs
        """
        self.logger.debug("ask children to requeue their objects")
        for child in self.family.alive_children():
            child.events["requeue"].set()
            child.event.set()

        self.logger.debug("fetching canceled objects...")
        while (self.objs.qsize() != 0 or
               any(child.events["requeue"].is_set()
                   for child in self.family.alive_children())):
            try:
                obj = self.objs.get_nowait()
                self.objs.task_done()
            except Empty:
                continue
            if obj is not None:
                self._canceled_objs.append(obj)

        # To be able to stop without the parent block sending an 'end object'
        if self.events["stop"].is_set():
            self._canceled_objs.append(None)
            self.events["stop"].clear()

        # Clear the event
        self.events["cancel"].clear()

    def _requeue_handler(self):
        """
        Requeue every object managed by the block or one of its children
        """
        for child in self.family.alive_children():
            child.events["requeue"].set()
            child.event.set()

        self.logger.debug("requeueing objects...")
        if self._obj is not None:
            self.family.parent.objs.put(self._obj)
            self._obj = None

        while (self.objs.qsize() != 0 or
               any(child.events["requeue"].is_set()
                   for child in self.family.alive_children())):
            try:
                obj = self.objs.get_nowait()
                self.objs.task_done()
            except Empty:
                # Do not waste that time
                if self._canceled_objs:
                    obj = self._canceled_objs.popleft()
                else:
                    continue
            if obj is not None:
                self.family.parent.objs.put(obj)

        for obj in filter(lambda x: x is not None, self._canceled_objs):
            self.family.parent.objs.put(obj)

        self.logger.debug("wait for parent to fetch all the objects...")
        self.family.parent.objs.join()

        # Processblock was potentially stopped
        self.events["stop"].clear()

        # Clear the event
        self.events["requeue"].clear()

    def _process_events(self, ignore=()):
        """
        Process events

        The order in which events are processed is important
        Returns:
            True --- if an Event was processed
            False --- otherwise
        """
        self.logger.debug("process events...")
        if not self.event.is_set():
            return False
        self.event.clear()

        event_processed = False
        for event_name in self.events:
            if event_name in ignore:
                continue
            if self.events[event_name].is_set():
                self.logger.debug("processing '%s' event", event_name)
                self.event_handlers[event_name]()
                event_processed = True

        return event_processed

    def get_obj(self, timeout=None):
        """
        Get an object from the parent block
        """
        self.logger.debug("get an object to process...")
        try:
            return self._canceled_objs.popleft()
        except IndexError:
            obj = self.family.parent.objs.get(timeout=timeout)
            self.family.parent.objs.task_done()
            return obj

    def try_publish_obj(self, obj, poll_interval=None):
        """
        Publish `obj` to child blocks (unless `obj` is None)

        Returns: True if `obj` was published
                 False if an event occured before `obj` was published
        """
        if obj is None:
            return True

        if not self.family.children:
            self.logger.debug("no one to pass '%s' onto", obj)
            return True

        self.logger.debug("publish '%s'", obj)
        while not self.event.is_set():
            try:
                self.objs.put(obj, timeout=poll_interval)
            except Full:
                continue
            return True

        # An event occured
        self.logger.debug("publication was interrupted by an event")
        return False

    def _cleanup(self):
        """
        Tell parent and siblings we stop and exit cleanly
        """
        if self.family.parent is not None:
            self.family.parent.event.set()
        for sibling in self.family.siblings:
            sibling.event.set()
        self.logger.debug("waiting for child processes...")
        for child in self.family.children:
            child.join()

    def run(self):
        """
        Launch child blocks and process objects
        """
        # Launch child blocks
        # Children are started here in order to build a gracefull process tree
        self.logger.debug("start %d child(ren)", len(self.family.children))
        for child in self.family.children:
            child.start()

        while not self.events["stop"].is_set():
            # Processing loop
            while not self.events["stop"].is_set():
                # Process exterior events
                if self._process_events():
                    continue

                # Find an object to process
                if self._obj is None:
                    try:
                        self._obj = self.get_obj(timeout=self._poll_interval)
                    except Empty:
                        continue

                    if self._obj is None:
                        self.logger.debug("received the 'end object'")
                        self.events["stop"].set()
                        self.event.set()
                        continue

                obj = self._obj

                # Process the object
                self.logger.debug("process '%s'", obj)
                try:
                    obj = self.process_obj(obj)
                except ProcessingError as exc:
                    self.logger.warning(exc)
                    continue
                except EventInterrupt:
                    # An event ocrrured, process it
                    continue

                # Publish the processed object, check for events periodically
                if self.try_publish_obj(obj,
                                        poll_interval=self._poll_interval):
                    # Object was published, or did not need to be
                    self._obj = None

            # Process the stop event (which is ignored in the loop underneath)
            self._process_events()

            # Wait for the entire family to stop, unless `stop` gets cleared
            while (self.events["stop"].is_set() and
                   not self.family.is_stopped()):
                self.event.wait()
                self._process_events(ignore=("stop",))

        # Process is exiting, there is no turning back
        # Every sibling/child process will shortly do so too (or already have)
        self._cleanup()
        self.logger.debug("terminating")
Exemplo n.º 53
0
class TracePathsBetweenNodes(object):
    def get_path_segments(self, data):
        ip_regex = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
        soup = BeautifulSoup(data, "lxml")
        tag = soup.pre
        source = soup.title
        source_ip = [ip_regex.search(x).group() for x in source]
        traceroute_output = tag.string.split("\\n")
        sanitized_output = [
            ip_regex.search(x).group() for x in traceroute_output
            if "ms" in x and ip_regex.search(x)
        ]
        nodes = source_ip + sanitized_output
        edges = [(x, y)
                 for x, y in zip(sanitized_output, sanitized_output[1:])]
        return {"nodes": nodes, "edges": edges}

    def do_http_req(self, command):
        print("Running treaceroute command", command)
        try:
            traceroute_output = urlopen(command, timeout=60)
            output_string = traceroute_output.read()
            return output_string
        except:
            print('socket timed out - URL %s', command)
            self.exit = Event()
            self.exit.set()

    def run_traceroute(self, command):
        path_segments = []
        sites = []
        output_string = self.do_http_req(command)
        if output_string is not None:
            ret = self.get_path_segments(str(output_string))
            path_segments += ret["edges"]
            sites += ret["nodes"]
            return {"edges": path_segments, "nodes": sites}
        else:
            return None

    def create_traceroute_commands(self, site_dict):
        site_list = [x for x in site_dict.values()]
        host_names_list = [x.split('/')[2] for x in site_dict.values()]
        traces = []
        for item in site_list:
            traces += [
                item + "gui/reverse_traceroute.cgi?target=" + x +
                "&function=traceroute" for x in host_names_list
                if x not in item
            ]
        return traces

    def get_nodes_and_edges(self, site_dict):
        commands = self.create_traceroute_commands(site_dict)
        nodes = []
        edges = []
        with Pool(processes=25) as p:
            result = p.map(self.run_traceroute, commands)

        #beautify this
        for item in result:
            #print(result)
            if item is not None:
                nodes.extend(item["nodes"])
                edges.extend(item["edges"])
        n = list(set(nodes))
        e = list(set(edges))
        return {"nodes": n, "edges": e}
Exemplo n.º 54
0
class WalletRPCManager(ProcessManager):
    def __init__(self,
                 resources_path,
                 wallet_file_path,
                 wallet_password,
                 app,
                 log_level=1):
        self.user_agent = str(uuid4().hex)
        wallet_log_path = os.path.join(os.path.dirname(wallet_file_path),
                                       "bixbite-wallet-rpc.log")
        wallet_rpc_args = u'%s/bin/bixbite-wallet-rpc --wallet-file %s --log-file %s --rpc-bind-port 44046 --user-agent %s --log-level %d' \
                                            % (resources_path, wallet_file_path, wallet_log_path, self.user_agent, log_level)

        ProcessManager.__init__(self, wallet_rpc_args, "bixbite-wallet-rpc")
        sleep(0.2)
        self.send_command(wallet_password)

        self.rpc_request = WalletRPCRequest(app, self.user_agent)
        #         self.rpc_request.start()
        self._ready = False
        self.block_height = 0
        self.is_password_invalid = Event()
        self.last_log_lines = []

    def run(self):
        is_ready_str = "Starting wallet rpc server"
        err_str = "ERROR"
        invalid_password = "******"
        height_regex = re.compile(
            r"Processed block: \<([a-z0-9]+)\>, height (\d+)")
        height_regex2 = re.compile(r"Skipped block by height: (\d+)")
        height_regex3 = re.compile(
            r"Skipped block by timestamp, height: (\d+)")

        for line in iter(self.proc.stdout.readline, b''):
            m_height = height_regex.search(line)
            if m_height: self.block_height = m_height.group(2)
            if not m_height:
                m_height = height_regex2.search(line)
                if m_height: self.block_height = m_height.group(1)
            if not m_height:
                m_height = height_regex3.search(line)
                if m_height: self.block_height = m_height.group(1)

            if not self._ready and is_ready_str in line:
                self._ready = True
                log(line.rstrip(), LEVEL_INFO, self.proc_name)
            elif err_str in line:
                self.last_error = line.rstrip()
                log(line.rstrip(), LEVEL_ERROR, self.proc_name)
                if not self.is_password_invalid.is_set(
                ) and invalid_password in self.last_error:
                    self.is_password_invalid.set()
            elif m_height:
                log(line.rstrip(), LEVEL_INFO, self.proc_name)
            else:
                log(line.rstrip(), LEVEL_DEBUG, self.proc_name)

            if len(self.last_log_lines) > 1:
                self.last_log_lines.pop(0)
            self.last_log_lines.append(line.rstrip())

        if not self.proc.stdout.closed:
            self.proc.stdout.close()

    def is_ready(self):
        return self._ready

    def is_invalid_password(self):
        return self.is_password_invalid.is_set()

    def stop(self):
        self.rpc_request.stop_wallet()
        if self.is_proc_running():
            counter = 0
            while True:
                if self.is_proc_running():
                    if counter < 5:
                        sleep(1)
                        counter += 1
                    else:
                        self.proc.kill()
                        log("[%s] killed" % self.proc_name, LEVEL_INFO,
                            self.proc_name)
                        break
                else:
                    break
        self._ready = False
        log("[%s] stopped" % self.proc_name, LEVEL_INFO, self.proc_name)
Exemplo n.º 55
0
class MultistreamCache():
    '''
    Input sample cache that employs a set of worker threads which collect new input samples from files to insert into the cache.
    Can produce sample batches by randomly selecting items from cache.
    Ensures at least a certain number of items are refreshed after each new batch is generated.
    '''
    def __init__(self,
                 worker_method,
                 worker_options,  # replace at least this many entries on each cache update. Can be fractional
                 alpha_smoother=0.99):   # the higher the more temporally smoothed is the average_replacement_rate. Not very important

        self.num_workers = worker_options["num_workers"]
        self.worker_method = worker_method
        self.worker_options = worker_options
        self.cache_size = worker_options["cache_size"]
        self.min_replacement_rate = worker_options["min_replacement_rate"]
        self.alpha_smoother = alpha_smoother

        # Internal Data Structures
        self.communication_queue = Queue(maxsize=50)  #TODO  hardcoded for now
        self.worker_handles = []
        self.cache = [None] * self.cache_size
        self.idx_next_item_to_be_updated = 0
        self.average_replacement_rate = self.min_replacement_rate
        self.exit_flag = Event()
        self.exit_flag.clear()
        self.counter_cache_items_updated = 0

        # call seed if this is used from different threads / processes
        seed()

    def start_workers(self):
        for k in range(self.num_workers):
            p = Process(target=self.worker_method,
                        args=(self.communication_queue,
                              self.exit_flag,
                              self.worker_options))
            p.start()
            self.worker_handles.append(p)

        # Fill cache
        print('----- Filling cache (Size: {}) -------'.format(self.cache_size))
        for k in range(self.cache_size):
            self.update_next_cache_item(self.communication_queue.get())
        print('----- Cache Filled -------')

        # We reset the update counter when starting the workers
        self.counter_cache_items_updated = 0

    def stop_workers(self):
        # We just kill them assuming there is nothing to be shut down properly.
        # This is somewhat brutal but simplifies things a lot and is enough for now
        self.exit_flag.set()
        for worker in self.worker_handles:
            worker.join(timeout=3)
            worker.terminate()  # try harder to kill it off if necessary

    def update_next_cache_item(self, data):
        self.cache[self.idx_next_item_to_be_updated] = data
        self.idx_next_item_to_be_updated = (self.idx_next_item_to_be_updated + 1) % self.cache_size
        self.counter_cache_items_updated += 1

    def update_cache_from_queue(self):

        # Implements a minimum update rate in terms of an average
        # number of items that have to be replaced in a call to this
        # function. If the average is not achieved, this functions
        # blocks until the required number of items are replaced in the
        # cache.

        num_replacements_current = 0
        average_replacement_rate_prev = self.average_replacement_rate

        while True:
            average_replacement_rate_updated = (1-self.alpha_smoother) * num_replacements_current + self.alpha_smoother * average_replacement_rate_prev

            if (average_replacement_rate_updated >= self.min_replacement_rate):
                break
            if num_replacements_current == self.cache_size:  # entire cache replaced? Your IO is super fast!
                break

            #print('Loading new item into cache from data list starting with ' + self.worker_options["file_list"][0])
            self.update_next_cache_item(self.communication_queue.get())
            num_replacements_current += 1

        # Final update of self.num_replacements_smoothed
        self.average_replacement_rate = average_replacement_rate_updated

    def get_cache_item(self, idx):
        return self.cache[idx]

    def set_cache_item(self, idx, item):
        self.cache[idx] = item
Exemplo n.º 56
0
class NeuralNetService:
    """
    This class starts a NeuralNet as a serverlike daemon and enables multiprocessing. It gives you the option to
    wait for the Prediction of your NeuralNet when you need them.

    :param netconf: NeuralNetConfiguration, adapted to your weights
    :param ptweights: the absolute Path to your weights
    :param start: If set to True, will start the service on Initialisation. Default: False
    ##### use this methods #######
        predict: let the loaded model detecting the given images, waits till detection is ready
        add_prediction_task: adds an imgList to a task queue, which the NeuralNet will work on
        get_prediction_task: returns the prediction of a task, first imgList in first prediction out (FIFO)
        start_service: starts the process hosting the NeuralNet
        shut_down_service: shuts down the service, dose not check for unfinished tasks in task queue
        ....
    """
    def __init__(self, netconf, ptweights, start=False):

        # important for logging in the console #
        log_to_stderr()
        logger = get_logger()
        logger.setLevel(logging.INFO)

        # initalizing the service
        self.init_service(netconf, ptweights, start)

    def __del__(self):
        self.shut_down_service()

    def add_prediction_task(self, imgList):
        """
        Puts imgList as a new task in task queue. (FIFO)
        :param imgList: Liste der Bilder
        :return: None
        """
        self._input_queue.put(imgList)

    def get_prediction_task(self, block=True, timeout=None):
        """
        Gets the first prediction in the prediction queue(FIFO). A way for synchronization if the default parameters
        are used. Otherwise if the queue is empty and the time to wait is out raises "queue.Empty" exception.
        :param block: If set to False, it dose not wait.
        :param timeout: If block is set to True(default), it waits timeout seconds for something to return.
        :return: Predictions as a List
        """
        out = self._output_queue.get(block=block, timeout=timeout)
        return out

    def predict(self, imgList):
        """
        Same method as like in NeuralNet
        Puts the imgList in the queue and waits for it to be done.
        :param imgList: a list with the images to detect
        :return: Predictions of the imgList
        """
        self.add_prediction_task(imgList)
        return self.get_prediction_task()

    def _start_neuralnet_service(self, netConf, pathObjWeights, input_queue,
                                 output_queue, shut_down_event):
        """
        Method being run in the spawned process.
        :param netConf: NeuralNetConfiguration, adapted to your weights
        :param pathObjWeights: the absolute Path to your weights
        :param input_queue: Queue with Elements to do the Prediction
        :param output_queue: Queue with the Predictions
        :param shut_down_event: Event to shutdown the Process, not working yet
        :return: None
        """
        # configurations for tensorflow so the model only occupys as much space as needed
        import tensorflow as tf
        from keras.backend.tensorflow_backend import set_session
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        session = tf.Session(config=config)
        set_session(session)

        net = NeuralNet(netConf, pathObjWeights)
        while not shut_down_event.is_set():
            try:
                input = input_queue.get()
            except input_queue.Empty:
                continue
            if input == "END":
                break
            output_queue.put(net.predict(input))

    def init_service(self, netConf, ptweights, start=False):
        """
        Initiates a new process, the old one will be shutten down and overwritten.
        :param netConf: NeuralNetConfiguration, adapted to your weights
        :param ptweights: the absolute Path to your weights
        :param start: If set to True, will start the service on Initialisation. Default: False
        :return: None
        """

        self._shut_down_event = Event()  # if set, the while-loop closes
        self._input_queue = Queue()  # task queue, the NeuralNet is working on
        self._output_queue = Queue(
        )  # prediction queue, the NeuralNet propagates the detections to

        if hasattr(self, '_process'):
            if self._process.is_alive():
                self.shut_down_service()

        self._process = Process(target=self._start_neuralnet_service,
                                args=(netConf, ptweights, self._input_queue,
                                      self._output_queue,
                                      self._shut_down_event),
                                daemon=True)
        if start:
            self.start_service()

    def start_service(self):
        self._process.start()

    def set_shut_down_event(self):
        """
        Starts to shut down the Service without waiting for it to be done.
        :return: None
        """
        self._shut_down_event.set()
        self.add_prediction_task("END")

    def shut_down_service(self):
        """
        Starts to shut down the Service and waits for it to be done.
        :return: None
        """
        self.set_shut_down_event()
        if self._process.is_alive():
            self._process.join()
Exemplo n.º 57
0
class TaskExecution(Process):

    # create the spotify object
    def make_spotify(self) -> spotipy.Spotify:
        try:
            self.logger.debug("Creating the Spotify instance")
            sp = spotipy.Spotify(auth=spotipy.util.prompt_for_user_token(
                config.USERNAME,
                config.SPOTIFY_SCOPE,
                config.SPOTIFY_CLIENT_ID,
                config.SPOTIFY_CLIENT_SECRET,
                config.SPOTIFY_CALLBACK,
                cache_path=".LetsMakeAPlaylistBot.cache"))
            sp.trace = False

        except Exception as e:
            self.logger.error("Error creating the spotify service... Exiting")
            self.logger.error(e)
            exit(1)

        return sp

    def __init__(self, queue: Queue):
        # initialize the parent object
        super(TaskExecution, self).__init__()

        # create our logger
        self.logger = logging.Logger(__name__)
        self.logger.setLevel(logging.DEBUG)
        file_handler = logging.FileHandler(__name__ + ".log")
        file_handler.setLevel(logging.DEBUG)
        formatter = logging.Formatter(
            "%(asctime)s - %(levelname)s - %(message)s")
        file_handler.setFormatter(formatter)
        self.logger.addHandler(file_handler)

        # create the objects to interact with spotify and reddit
        self.spotify = self.make_spotify()
        self.reddit = praw.Reddit(client_id=config.REDDIT_ID,
                                  client_secret=config.REDDIT_SECRET,
                                  username=config.USERNAME,
                                  password=config.REDDIT_PASSWORD,
                                  user_agent="LetsMakeAPlaylist")

        # create our queue and the exit event
        self.job_queue = queue
        self.exit_event = Event()

    # exit the task execution cleanly
    def clean_exit(self):
        self.logger.debug("Attempting to exit gracefully")
        # put nothing in the queue in case the queue.get() function is blocking
        self.job_queue.put(None)
        # set the event to exit
        self.exit_event.set()
        self.logger.debug(
            "Waiting to complete the current task and then exiting")
        self.job_queue.put(None)

    def run(self) -> None:
        # while our exit flag is not set
        while not self.exit_event.is_set():
            # get the next job
            job = self.job_queue.get()
            # if the job is None it was put here to exit so continue
            if job is None:
                continue

            # complete the tasks required
            while True:
                try:
                    job[0].do_spotify_task(self.spotify, self.logger)
                    self.logger.debug("Completed a spotify task")
                    job[0].do_reddit_task(self.reddit, self.logger, job[1])
                    break
                except Exception as e:
                    self.logger.error(
                        f"Failed to complete task with error {e}")
                    time.sleep(4)
Exemplo n.º 58
0
class Generator(Process, GeneratorBase):
    def __init__(self, name, profile):
        Process.__init__(self)
        GeneratorBase.__init__(self, name, profile)
        self.stopit = Event()
        self.stopped = Event()

    def send_recv(self, pkt, timeout=2):
        # Should wait for the ICMP reply when sending ICMP request.
        # So using scapy's "sr1".
        log.debug("Sending: %s", repr(pkt))
        proto = self.profile.stream.get_l4_proto()
        if proto == "icmp" or proto == "icmpv6":
            conf.promisc = 1
            p = sr1(pkt, timeout=timeout)
            if p:
                log.debug("Received: %s", repr(pkt))
                self.recv_count += 1
        else:
            conf.promisc = 0
            send(pkt)
        self.count += 1
        self.update_result("Sent=%s\nReceived=%s" %
                           (self.count, self.recv_count))

    def _standard_traffic(self):
        for i in range(self.profile.count):
            self.send_recv(self.pkt)
        self.stopped.set()

    def _continuous_traffic(self):
        while not self.stopit.is_set():
            self.send_recv(self.pkt)
        self.stopped.set()

    def _burst_traffic(self):
        for i in range(self.profile.count):
            for j in range(self.profile.burst_count):
                self.send_recv(self.pkt)
                sleep(self.profile.burst_interval)
        self.stopped.set()

    def _continuous_sport_range_traffic(self):
        self.pkts = self.creater.pkts
        while not self.stopit.is_set():
            sendpfast(self.pkts, pps=self.profile.pps)
            self.count += len(self.pkts)
            self.update_result("Sent=%s\nReceived=%s" %
                               (self.count, self.recv_count))
            if self.stopit.is_set():
                break
        self.stopped.set()

    def _start(self):
        # Preserve the order of the if-elif, because the Profiles are
        # inherited from StandardProfile, So all the profiles will be
        # instance of StandardProfile
        if isinstance(self.profile, ContinuousSportRange):
            self._continuous_sport_range_traffic()
        elif isinstance(self.profile, ContinuousProfile):
            self._continuous_traffic()
        elif isinstance(self.profile, BurstProfile):
            self._burst_traffic()
        elif isinstance(self.profile, StandardProfile):
            self._standard_traffic()

    def run(self):
        try:
            self._start()
        except Exception as err:
            log.warn(traceback.format_exc())
        finally:
            log.info("Total packets sent: %s", self.count)
            log.info("Total packets received: %s", self.recv_count)
            self.update_result("Sent=%s\nReceived=%s" %
                               (self.count, self.recv_count))

    def stop(self):
        if not self.is_alive():
            return

        if (isinstance(self.profile, ContinuousProfile)
                or isinstance(self.profile, ContinuousSportRange)):
            self.stopit.set()

        while (self.is_alive() and not self.stopped.is_set()):
            continue
        if self.is_alive():
            self.terminate()
Exemplo n.º 59
0
class MinerWork(Process):
    def __init__(self, thr_id, work_submit_queue, g_work, hash_report,
                 cpu_priority_level):
        Process.__init__(self)
        self._cur_job_id = None
        self._hash_rate = 0.0
        self._thr_id = thr_id
        self._work_submit_queue = work_submit_queue
        self._g_work = g_work
        self._hash_report_queue = hash_report
        self.exit = Event()

        _p = psutil.Process(self.pid)
        _cpu_affinity = [CPU_COUNT - (thr_id % CPU_COUNT) - 1]
        if sys.platform == "win32":
            _p.cpu_affinity(_cpu_affinity)
        #_p.nice(cpu_priority_level)

    def run(self):
        _total_hashes = 0

        blob_bin = None
        nonce = 1
        max_nonce = target = login_id = 0
        #end_nonce = MAX_INT - 0x20
        end_nonce = 0
        is_cryptolite = 0  # (if) is cryptonight-lite algo
        #         max_int32 = 2**32        # =4294967296

        while not self.exit.is_set():
            if not 'job_id' in self._g_work or self._g_work['job_id'] is None:
                self._hash_rate = 0.
                self._shareHashRate()
                time.sleep(.1)
                continue

            if self._g_work['job_id'] != self._cur_job_id:
                self._cur_job_id = self._g_work['job_id']
                nonce = self._g_work['nonce']
                blob_bin = self._g_work['blob_bin']
                target = self._g_work['target']
                login_id = self._g_work['login_id']
                is_cryptolite = self._g_work['is_cryptolite']
                end_nonce = MAX_INT / self._g_work['num_thrs'] * (
                    self._thr_id + 1) - 0x20
                nonce += MAX_INT / self._g_work['num_thrs'] * self._thr_id
                """ randomize nonce start"""
                if settings.OPT_RANDOMIZE:
                    offset = int(
                        settings.OPT_SCANTIME * self._hash_rate
                    ) if self._hash_rate > 0 else 64 * settings.OPT_SCANTIME
                    nonce += random.randint(
                        0, MAX_INT / self._g_work['num_thrs'] - offset)
                if nonce > MAX_INT - 0x20:
                    nonce = end_nonce

            max64 = int(settings.OPT_SCANTIME *
                        self._hash_rate) if self._hash_rate > 0 else 64
            if nonce + max64 > end_nonce:
                max_nonce = end_nonce
            else:
                max_nonce = nonce + max64

            if max_nonce > MAX_INT:
                max_nonce = MAX_INT
            """ start _hash scan """
            total_hashes_done = 0
            _hashes_done = 0
            start = _start = time.time()
            while nonce <= max_nonce and not self.exit.is_set():
                nonce_bin = struct.pack("<I", nonce)
                blob_bin = blob_bin[:39] + nonce_bin + blob_bin[43:]

                if is_cryptolite:
                    _hash = cryptolite_hash(blob_bin, HAS_AES_NI)
                else:
                    _hash = cryptonite_hash(blob_bin, HAS_AES_NI)

                nonce += 1
                _hashes_done += 1
                total_hashes_done += 1
                """ calculate _hash rate"""
                if _hashes_done >= self._hash_rate / 2:
                    #                 if _hashes_done >= 10:
                    elapsed = time.time() - _start
                    if elapsed > 0:
                        self._hash_rate = _hashes_done / elapsed
                        """ share _hash rate """
                        self._shareHashRate()
                        log(
                            'CPU #%d: %.2f H/s' %
                            (self._thr_id, self._hash_rate), LEVEL_DEBUG)
                        _start = time.time()
                        _hashes_done = 0

                if struct.unpack("<I", _hash[28:])[0] < target:
                    """ Yes, hash found! """
                    params = dict(id=login_id,
                                  job_id=self._cur_job_id,
                                  nonce=hexlify(nonce_bin),
                                  result=hexlify(_hash))
                    self._work_submit_queue.put({
                        'method': 'submit',
                        'params': params
                    })
                    break
                """ if there is a new work, break scan """
                if self._g_work['job_id'] != self._cur_job_id:
                    break

            elapsed = time.time() - start
            self._hash_rate = total_hashes_done / elapsed if elapsed > 0 else 0.
            """ share _hash rate """
            self._shareHashRate()
            log('CPU #%d: %.2f H/s' % (self._thr_id, self._hash_rate),
                LEVEL_DEBUG)
            """ if idle: """
            if total_hashes_done == 0:
                time.sleep(.1)

        ## Set hash_rate to 0.0 before exit
        self._hash_rate = 0.
        self._shareHashRate()

    def _shareHashRate(self):
        self._hash_report_queue.update({'%d' % self._thr_id: self._hash_rate})

    def shutdown(self):
        log("Miner thread# %d shutdown initiated" % self._thr_id, LEVEL_DEBUG)
        self.exit.set()

    def set_cpu_priority(self, cpu_priority_level):
        _p = psutil.Process(self.pid)
        _p.nice(cpu_priority_level)

    def show_priority(self):
        _p = psutil.Process(self.pid)
        print "PID", _p.pid, "Priority", _p.nice()
Exemplo n.º 60
0
class STT(object):
    """Speech To Text processing class
        
        This is a multithreaded wrapper for the pocketsphinx audio N-Gram processing class.
        Create a new STT object for every client that connects to the websocket

        Attributes:
            
    """
    def __init__(self):
        self._is_ready = None
        self._subprocess_callback = None
        self._loaded_model = False
        self._p_out, self._p_in = Pipe(
        )  # Create a new multiprocessing Pipe pair
        self._shutdown_event = Event(
        )  # Create an event to handle the STT shutdown
        self._process = Process(target=self.__worker,
                                args=((self._p_out, self._p_in),
                                      log))  # Create the subprocess fork
        self._process.start()  # Start the subprocess fork

        self._subprocess_t = Thread(target=self.__handle_subprocess)
        self._subprocess_t.setDaemon(True)
        self._subprocess_t.start()

    def __worker(self, pipe, l_log):
        """The core of the STT program, this is the multiprocessed part

        Note:
            Multiprocessing will require a pipe between the parent and child subprocess.
            Since this is the case, the worker subprocess cannot access non-shared variables

        """

        l_log.debug("STT worker started")

        audio_processor = AudioProcessor(
        )  # Create a new audio processing object
        text_processor = TextProcessor(
        )  # Remember that we can't load the text processor nltk model until the nltk model is set from the client language
        config = Decoder.default_config(
        )  # Create a new pocketsphinx decoder with the default configuration, which is English
        decoder = None
        nltk_model = None
        mutex_flags = {"keyphrases": {"use": False}}
        shutdown_flags = {"shutdown": False, "decoder": None}

        def send_json(pipe, to_send):
            """Internal worker method to send a json through the parent socket

            Arguments:
                pipe (:obj: socket): The response pipe to send to the parent process
                to_send (:obj: dict): A dictionary to be sent to the parent socket

            """
            try:
                ret = self.__send_buffered(
                    pipe, to_send
                )  # Send the message passed by argument back to the parent process
                if not ret[0]:
                    l_log.error(
                        "Failed to send buffered message to the parent process! (err: %s)"
                        % ret[1])
            except Exception as err:
                l_log.error("Failed to send json! (err: %s)" % str(err))

        def send_error(pipe, error):
            """Internal worker method to send a json error through the parent socket

            Arguments:
                pipe (:obj: socket): The response pipe to send to the parent process
                error (str): The string error message to send

            """
            send_json(pipe, {"error": error})

        def load_models(pipe, config, models):
            """Internal worker method to load the language model

            Note:
                Some lanaguages take a long time to load. English is by far
                the fastest language to be loaded as a model.
            
            Arguments:
                pipe (:obj: socket): The response pipe to send to the parent process
                models (dict): The language and nltk models developed by the parent process
           
            Returns: (Decoder)
                The STT decoder object and the nltk model

            """

            language_model = models["language_model"]
            nltk_model = models["nltk_model"]

            if False in [
                    language_model.is_valid_model(),
                    nltk_model.is_valid_model()
            ]:
                l_log.error("The language model %s is invalid!" %
                            str(language_model.name))
                send_error(pipe, "Failed loading language model!")
                return

            # Load the model configurations into pocketsphinx
            config.set_string('-hmm', str(language_model.hmm))
            config.set_string('-lm', str(language_model.lm))
            config.set_string('-dict', str(language_model.dict))
            decoder = Decoder(config)

            send_json(
                pipe,
                {"success": True})  # Send a success message to the client

            l_log.debug("Set the language model to %s" %
                        str(language_model.name))

            return decoder, nltk_model  # Return the new decoder and nltk model

        def process_text(pipe, text, is_final, args):
            """Internal worker method to process the Speech To Text phrase

            Arguments:
                pipe (:obj: socket): The response pipe to send to the parent process
                text (str): The spoken text to further process
                is_final (boo): If the text being processed is the final text else it's a partial result
                args (dict): Any other flags specifically required for a final or partial speech result
            """

            generate_keyphrases = mutex_flags["keyphrases"]["use"]
            keyphrases = []

            if generate_keyphrases:
                text_processor.generate_keyphrases(
                    text)  # Generate keyphrases from the given text
                keyphrases_list = text_processor.get_keyphrases()

                for keyphrase in keyphrases_list:
                    to_append_keyphrase = {
                        "score": keyphrase[0],
                        "keyphrase": keyphrase[1]
                    }
                    keyphrases.append(to_append_keyphrase)
            else:
                keyphrases = text  # Don't do any processing and just pass the text into the keyphrases

            # Generate the json to be sent back to the client
            hypothesis_results = args
            hypothesis_results["keyphrases"] = generate_keyphrases
            if is_final:
                hypothesis_results["hypothesis"] = keyphrases
            else:
                hypothesis_results["partial_hypothesis"] = keyphrases

            print(hypothesis_results)

            # Send the results back to the client
            send_json(pipe, hypothesis_results)

        def start_audio(pipe, decoder, args):
            """Internal worker method to start the audio processing chunk sequence

            Note:
                This must be called before the process_audio method or the STT engine will not process the audio chunks

            Arguments:
                pipe (:obj: socket): The response pipe to send to the parent process
                decoder (Decoder): The pocketsphinx decoder to control the STT engine
                args (dict): All of the available arguments passed by the parent process

            """

            if decoder is None:
                l_log.error("Language model is not loaded")
                send_error(pipe, "Language model not loaded!")
                send_json(pipe, {"decoder": False})
                return

            l_log.debug("Starting the audio processing...")

            decoder.start_utt()  # Start the pocketsphinx listener

            # Tell the client that the decoder has successfully been loaded
            send_json(pipe, {"decoder": True})

        def process_audio(pipe, decoder, args):
            """Internal worker method to process an audio chunk

            Note:
                The audio chunk is expected to be in base64 format

            Arguments:
                pipe (:obj: socket): The response pipe to send to the parent process
                decoder (Decoder): The pocketsphinx decoder to control the STT engine
                args (dict): All of the available arguments passed by the parent process

            """
            if decoder is None:
                l_log.error("Language model is not loaded")
                send_error(pipe, "Language model not loaded!")
                return

            l_log.debug("Processing audio chunk!")

            audio_chunk = args["audio"]  # Retrieve the audio data
            processed_wav = audio_processor.process_chunk(
                audio_chunk)  # Process the base64 wrapped audio data

            l_log.debug("Recognizing speech...")

            decoder.process_raw(
                processed_wav, False,
                False)  # Process the audio chunk through the STT engine

            hypothesis = decoder.hyp()  # Get pocketshpinx's hypothesis

            # Send back the results of the decoding
            if hypothesis is None:
                l_log.debug("Silence detected")
                send_json(pipe, {
                    "partial_silence": True,
                    "partial_hypothesis": None
                })
            else:
                hypothesis_results = {
                    "partial_silence":
                    False if len(hypothesis.hypstr) > 0 else True,
                }

                l_log.debug("Partial speech detected: %s" %
                            str(hypothesis.hypstr))
                process_text(pipe, hypothesis.hypstr, False,
                             hypothesis_results)

            l_log.debug("Done decoding speech from audio chunk!")

        def stop_audio(pipe, decoder, args):
            """Internal worker method to stop the audio processing chunk sequence

            Note:
                This must be called after the process_audio method or the STT engine will continue to listen for audio chunks

            Arguments:
                pipe (:obj: socket): The response pipe to send to the parent process
                decoder (Decoder): The pocketsphinx decoder to control the STT engine
                args (dict): All of the available arguments passed by the parent process

            """

            if decoder is None:
                l_log.error("Language model is not loaded")
                send_error(pipe, "Language model not loaded!")
                send_json({"decoder": False})
                return

            l_log.debug("Stopping the audio processing...")

            decoder.end_utt()  # Stop the pocketsphinx listener

            l_log.debug("Done recognizing speech!")

            hypothesis = decoder.hyp()  # Get pocketshpinx's hypothesis
            logmath = decoder.get_logmath()

            # Send back the results of the decoding
            if hypothesis is None:
                l_log.debug("Silence detected")
                send_json(pipe, {"silence": True, "hypothesis": None})
            else:
                hypothesis_results = {
                    "silence": False if len(hypothesis.hypstr) > 0 else True,
                    "score": hypothesis.best_score,
                    "confidence": logmath.exp(hypothesis.prob)
                }

                l_log.debug("Speech detected: %s" % str(hypothesis.hypstr))
                process_text(pipe, hypothesis.hypstr, True, hypothesis_results)

        def shutdown_thread(self, l_log):
            """Worker method to handle the checking of a shutdown call

            Note:
                To reduce overhead, this thread will only be called every 100 milliseconds

            """
            while not shutdown_flags["shutdown"]:
                try:
                    if self._shutdown_event.is_set():
                        l_log.debug("Shutting down worker thread!")
                        shutdown_flags["shutdown"] = True  # Exit the main loop
                        if shutdown_flags["decoder"] is not None:
                            try:
                                shutdown_flags["decoder"].end_utt()
                            except Exception as err:
                                l_log.debug(
                                    "STT decoder object returned a non-zero status"
                                )
                        else:
                            l_log.warning(
                                "The decoder object is already None!")

                        break
                    sleep(0.1)
                except Exception as err:
                    l_log.error(
                        "Failed shutting down worker thread! (err: %s)" %
                        str(err))

        shutdown_t = Thread(target=shutdown_thread, args=(
            self,
            l_log,
        ))
        shutdown_t.setDaemon(True)
        shutdown_t.start()

        p_out, p_in = pipe
        while not shutdown_flags["shutdown"]:
            try:
                try:
                    command = self.__get_buffered(
                        p_out)  # Wait for a command from the parent process
                    if "set_models" in command[
                            "exec"]:  # Check to see if our command is to
                        decoder, nltk_model = load_models(
                            p_out, config, command["args"])
                        text_processor.set_nltk_model(
                            nltk_model)  # Set the text processor nltk model
                        shutdown_flags["decoder"] = decoder
                    elif "start_audio" in command["exec"]:
                        start_audio(p_out, decoder, command["args"])
                    elif "process_audio" in command["exec"]:
                        process_audio(p_out, decoder, command["args"])
                    elif "stop_audio" in command["exec"]:
                        stop_audio(p_out, decoder, command["args"])
                    elif "set_keyphrases" in command["exec"]:
                        mutex_flags["keyphrases"] = command["args"]
                    else:
                        l_log.error("Invalid command %s" % str(command))
                        send_error(socket, "Invalid command!")
                except (EOFError, IOError) as err:
                    continue
            except Exception as err:
                l_log.error(
                    "Failed recieving command from subprocess (id: %d) (err: %s)"
                    % (current_process().pid, str(err)))

    def __send_to_worker(self, t_exec, to_send):
        """Private method to handle sending to the subprocess worker

        Arguments:
            t_exec (str): The subprocess execution method (ex: set_model or process_audio)
            to_send (:obj: dict): The dictionary arguments to send to the subprocess worker

        """
        ret = self.__send_buffered(self._p_in, {
            "exec": t_exec,
            "args": to_send
        })
        if not ret[0]:
            log.error("Failed to send buffered! (err: %s)" % ret[1])

    def __get_buffered(self, pipe):
        """Private method to handle buffered recieving from a pipe

        Note:
            This concept should work on most sockets

        Arguments:
            pipe (Pipe): The pipe to recieve from

        Returns: (obj)
            The decoded jsonpickle object

        """

        raw_command = ""
        while True:  # Load the message into a buffer
            try:
                raw_command += pipe.recv(
                )  # Wait for a command from the child process
                if "<!EOF!>" in raw_command:
                    raw_command = raw_command.replace("<!EOF!>", "")
                    break
            except (EOFError, IOError) as err:
                sleep(0.01)
        return jsonpickle.decode(raw_command)  # Decode the object

    def __send_buffered(self, pipe, to_send):
        """Private method to handle buffered sending to a pipe

        Note:
            This concept should work on most sockets

        Arguments:
            pipe (Pipe): The pipe to send to
            to_send (obj): Any object you wish to send through the pipe
        """
        def send_pipe(pipe, chunk):
            timeout = 0  # Broken pipe detection
            while True:
                try:
                    pipe.send(chunk)
                    return True
                except (EOFError, IOError) as err:
                    timeout += 1
                    if timeout > 1000:  # Don't attempt to send to a broken pipe more than 1000 times
                        return False
                    sleep(0.0005)  # Wait 500 nano seconds
                    pass

        try:
            pickled = jsonpickle.encode(to_send)  # Encode the object with EOF
            chunks = re.findall(".{1,3000}",
                                pickled)  # Chunk the string into a string list
            for chunk in chunks:
                if not send_pipe(pipe, chunk):  # Send each chunk individually
                    return (False, "Chunk failed to send!")
            if not send_pipe(
                    pipe,
                    "<!EOF!>"):  # Send an EOF to indicate the end of file
                return (False, "EOF failed ot send")
            return (True, "")
        except Exception as err:
            return (False, str(err))

    def __handle_subprocess(self):
        """Private method to handle the return callback from the subprocess

        Note:
            This should run in its own thread
        """

        while True:
            try:
                try:
                    command = self.__get_buffered(self._p_in)
                    if self._subprocess_callback is not None:
                        self._subprocess_callback(command)
                    else:
                        log.warning("Subprocess callback is None!")
                except (EOFError, IOError) as err:
                    sleep(0.01)  # Wait 10 milliseconds
                    continue
            except Exception as err:
                log.error(
                    "Failed recieving command from parent process (err: %s)" %
                    str(err))

    def set_subprocess_callback(self, callback):
        """Method to set the callback of the child process

        Note:
            This function will be called within the parent process thread
        
        Arguments:
            callback (:obj: method): The callback method to handle the subprocess calling

        """
        self._subprocess_callback = callback

    def set_models(self, language_model, nltk_model):
        """Method to set the STT object's language model

        Note:
            This will reload the entire language model and might take some time
        
        Arguments:
            language_model (LanguageModel): The loaded language model to be processed for the STT engine
            nltk_model (NLTKModel): The loaded nltk model to be processed for the text processing object
        """
        self.__send_to_worker("set_models", {
            "language_model": language_model,
            "nltk_model": nltk_model
        })

    def process_audio_chunk(self, audio_chunk):
        """Method to process an audio chunk

        Note:
            The audio chunk is expected to be in base64 format

        Arguments:
            audio_chunk (str): The base64 wrapped audio chunk to be parsed and sent back to the client
        """
        self.__send_to_worker("process_audio", audio_chunk)

    def start_audio_proc(self):
        """Method to start the audio processing

        Note:
            This must be called before the process_audio_chunk method

        """
        self.__send_to_worker("start_audio", {})

    def stop_audio_proc(self):
        """Method to stop the audio processing

        Note:
            This must be called after the series of process_audio_chunk method calls

        """
        self.__send_to_worker("stop_audio", {})

    def set_keyphrases(self, keyphrases):
        """Method to set the keyphrases flag
        
        Arguments:
            keyphrases (dict): The keyphraeses flags
        """
        self.__send_to_worker("set_keyphrases", keyphrases)

    def shutdown(self):
        """Method to shutdown and cleanup the STT engine object

        Note:
            The shutdown will not happen immediately, and this function might lag the entire
            process out for a few hundred milliseconds
        """
        self._shutdown_event.set(
        )  # Set the multiprocessing shutdown_event to set

        def terminate_soon(self):
            try:
                sleep(1)  # Wait a second for the subprocess to clean itself
                self._process.terminate()  # Destroy the entire subprocess
            except Exception as err:
                log.error("Failed terminating worker subprocess! (err: %s)" %
                          str(err))

        # Wait for the subprocess to retrieve the shutdown event and then destroy the subprocess
        terminate_soon_t = Thread(target=terminate_soon, args=(self, ))
        terminate_soon_t.setDaemon(True)
        terminate_soon_t.start()