Exemplo n.º 1
0
class Transcoder:
    def __init__(self):
        self.stopping = Event()

    def stop(self):
        logger.debug("Preventing new transcoding processes.")
        self.stopping.set()

    def transcode(self, path, format='mp3', bitrate=False):
        if self.stopping.is_set():
            return
        try:
            stop = Event()
            start_time = time.time()
            parent_conn, child_conn = Pipe()
            process = Process(target=transcode_process,
                    args=(child_conn, path, stop, format, bitrate))
            process.start()
            while not (self.stopping.is_set() or stop.is_set()):
                data = parent_conn.recv()
                if not data:
                    break
                yield data
            logger.debug("Transcoded %s in %0.2f seconds." % (path.encode(cfg['ENCODING']), time.time() - start_time))
        except GeneratorExit:
            stop.set()
            logger.debug("User canceled the request during transcoding.")
        except:
            stop.set()
            logger.warn("Some type of error occured during transcoding.")
        finally:
            parent_conn.close()
            process.join()
def single_output(stop_event: Event):
    print("single output get queue:")
    sum_limit = 1000
    counter = 0
    manager, output_q = get_queue_client(QueueManager.MachineSettingCrawler, QueueManager.Method_Whois_Input)
    while not stop_event.is_set():
        try:
            while not output_q.empty() or not stop_event.is_set():
                result = output_q.get(False, 1)
                counter += 1
                if isinstance(result, list):
                    for item in result:
                        print("server queue output:", str(item), "count:", counter)
                else:
                    # print(result)
                    pass
                if counter/sum_limit > 0 and counter % sum_limit==0:
                    print("current output count is:", counter)
                time.sleep(0.000001)
        except Exception as ex:
            pass
            # manager, output_q = get_queue_client(QueueManager.MachineSettingCrawler, QueueManager.Method_Whois_Output)
        finally:
            print("going to sleep.")
            time.sleep(1)
Exemplo n.º 3
0
class Worker(Process):
    def __init__(self, buffer, reorder_buffer, job):
        Process.__init__(self)
        self.buffer = buffer
        self.reorder_buffer = reorder_buffer
        self.job = job
        self.event = Event()

    def run(self):
        self.event.set()
        while self.event.is_set():
            try:
                block_number, data = self.buffer.get()
            except IOError, e:
                if e.errno == errno.EINTR:
                    data = EOF

            if data == EOF:
                self.stop()
                break
            worked_data = self.job(data)

            while self.event.is_set():
                try:
                    self.reorder_buffer.put(block_number, worked_data)
                    break
                except IndexError:
                    # Block num bigger than expected,
                    # wait untill ReorderBuffer start
                    # processing blocks in this range
                    time.sleep(0.1)
                except IOError, e:
                    if e.errno == errno.EINTR:
                        self.stop()
Exemplo n.º 4
0
class Cluster(object):
    def __init__(self, broker=None):
        self.broker = broker or get_broker()
        self.sentinel = None
        self.stop_event = None
        self.start_event = None
        self.pid = current_process().pid
        self.host = socket.gethostname()
        self.timeout = Conf.TIMEOUT
        signal.signal(signal.SIGTERM, self.sig_handler)
        signal.signal(signal.SIGINT, self.sig_handler)

    def start(self):
        # Start Sentinel
        self.stop_event = Event()
        self.start_event = Event()
        self.sentinel = Process(target=Sentinel, args=(self.stop_event, self.start_event, self.broker, self.timeout))
        self.sentinel.start()
        logger.info(_("Q Cluster-{} starting.").format(self.pid))
        while not self.start_event.is_set():
            sleep(0.1)
        return self.pid

    def stop(self):
        if not self.sentinel.is_alive():
            return False
        logger.info(_("Q Cluster-{} stopping.").format(self.pid))
        self.stop_event.set()
        self.sentinel.join()
        logger.info(_("Q Cluster-{} has stopped.").format(self.pid))
        self.start_event = None
        self.stop_event = None
        return True

    def sig_handler(self, signum, frame):
        logger.debug(_("{} got signal {}").format(current_process().name, Conf.SIGNAL_NAMES.get(signum, "UNKNOWN")))
        self.stop()

    @property
    def stat(self):
        if self.sentinel:
            return Stat.get(self.pid)
        return Status(self.pid)

    @property
    def is_starting(self):
        return self.stop_event and self.start_event and not self.start_event.is_set()

    @property
    def is_running(self):
        return self.stop_event and self.start_event and self.start_event.is_set()

    @property
    def is_stopping(self):
        return self.stop_event and self.start_event and self.start_event.is_set() and self.stop_event.is_set()

    @property
    def has_stopped(self):
        return self.start_event is None and self.stop_event is None and self.sentinel
class CheckerProcess(Process):
    def __init__(self, team_id, checks, db_host, db_port, db_name, check_delay):
        super(CheckerProcess, self).__init__()
        self.team_id = team_id
        self.db = MongoDBWrapper(db_host, int(db_port), db_name)
        self.check_delay = int(check_delay)
        self.shutdown_event = Event()
        self.checks = []
        for check in checks:
            check_db_data = self.db.get_specific_check(check.check_id, check.check_class.check_type)
            if len(check_db_data) > 0:
                check_data = check_db_data[0]
                if issubclass(check.check_class, InjectCheck):
                    check_obj = check.check_class(check_data['machine'], team_id, db_host, db_port, db_name, check_data['time_to_check'])
                    self.checks.append(Checker(check.check_id, check_obj, check_obj.time_to_run))
                elif issubclass(check.check_class, (ServiceCheck, AttackerCheck)):
                    check_obj = check.check_class(check_data['machine'], team_id, db_host, db_port, db_name)
                    self.checks.append(Checker(check.check_id, check_obj, datetime.now()))
        random.shuffle(self.checks, random.random)

    def run(self):
        while not self.shutdown_event.is_set():
            self.run_checks()

    def run_checks(self):
        indices_to_remove = []
        for i in range(0, len(self.checks)):
            check = self.checks[i]
            check_obj = copy(check.object)
            now = datetime.now()
            print check.time_to_run, '<', now, '=', check.time_to_run < now
            if check.time_to_run < now:
                check_process = Process(target=check_obj.run_check)
                check_process.start()
                check_process.join(check_obj.timeout)
                if check_process.is_alive():
                    check_process.terminate()
                score = check_obj.score
                if issubclass(type(check_obj), InjectCheck):
                    self.db.complete_inject_check(check.id, self.team_id, datetime.now(), score)
                    #self.checks[:] = [obj for obj in self.checks if not obj == check]
                    indices_to_remove.append(i)
                elif issubclass(type(check_obj), ServiceCheck):
                    self.db.complete_service_check(check.id, self.team_id, datetime.now(), score)
                    check.timestamp = datetime.now() + timedelta(seconds=self.check_delay)
                elif issubclass(type(check_obj), AttackerCheck):
                    self.db.complete_attacker_check(check.id, self.team_id, datetime.now(), score)
                    check.timestamp = datetime.now() + timedelta(seconds=self.check_delay)
                self.db.calculate_scores_for_team(self.team_id)
            if self.shutdown_event.is_set():
                break
        indices_to_remove.sort(reverse=True)
        for i in indices_to_remove:
            del self.checks[i]
Exemplo n.º 6
0
class SharedEvent:
    def __init__(self, timeout = 0.7):
        self._rest = Event()
        self._exit = Event()
        self.timeout = timeout
        self._rest.set()

    def shutdown(self):
        while self._rest.is_set():
            time.sleep(self.timeout)
        return self._exit.is_set()
Exemplo n.º 7
0
class ParserThread(Process):
    """ Class writes simulated data to navigation system and
    receives results """

    def __init__(self, device, baudrate, q):
        Process.__init__(self)
        self.device = device
        self.baudrate = baudrate
        self.q = q
        self.__stop = Event()

    def stop(self):
        self.__stop.set()

    def mav_dispatch(self, m):
        try:
            self.q.put_nowait(m)
        except Full:
            pass

    def run(self):
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        print("*** parser process started. Pid:", os.getpid())
        master = mavutil.mavlink_connection(self.device, self.baudrate, dialect="lapwing")

        while not self.__stop.is_set():
            m = master.recv_msg()
            if (m is not None):
                dbg_print(m)
                self.mav_dispatch(m)

        master.close()
        print("*** parser process stopped")
Exemplo n.º 8
0
def test_recycle(broker, monkeypatch):
    # set up the Sentinel
    broker.list_key = 'test_recycle_test:q'
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    start_event = Event()
    stop_event = Event()
    # override settings
    monkeypatch.setattr(Conf, 'RECYCLE', 2)
    monkeypatch.setattr(Conf, 'WORKERS', 1)
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, broker=broker)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    async('django_q.tests.tasks.multiply', 2, 2, broker=broker)
    task_queue = Queue()
    result_queue = Queue()
    # push two tasks
    pusher(task_queue, stop_event, broker=broker)
    pusher(task_queue, stop_event, broker=broker)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value('f', -1))
    # check if the work has been done
    assert result_queue.qsize() == 2
    # save_limit test
    monkeypatch.setattr(Conf, 'SAVE_LIMIT', 1)
    result_queue.put('STOP')
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    broker.delete_queue()
Exemplo n.º 9
0
def pipelineDaemon(pipeline, returnEvent, options=None, programName=None):
    """Launches Pyro server and (if specified by options) pipeline executors"""
    
    #check for valid pipeline 
    if pipeline.runnable.empty()==None:
        print "Pipeline has no runnable stages. Exiting..."
        sys.exit()

    if options.urifile==None:
        options.urifile = os.path.abspath(os.curdir + "/" + "uri")
    
    e = Event()
    process = Process(target=launchServer, args=(pipeline,options,e,))
    process.start()
    e.wait()
    if options.num_exec != 0:
        processes = [Process(target=launchPipelineExecutor, args=(options,programName,)) for i in range(options.num_exec)]
        for p in processes:
            p.start()
        for p in processes:
            p.join()
    
    #Return to calling code if pipeline has no more runnable stages:
    #Event will be cleared once clients are unregistered. 
    while e.is_set():
        time.sleep(5)
    returnEvent.set()
Exemplo n.º 10
0
class Updater(Process):

    def __init__(self, maxsize=15):
        Process.__init__(self)
        #self.queue = Queue(maxsize)
        self.queue = Queue()
        self.queue_lock = Lock()
        self._exit = Event()

    def run(self):
        while not self._exit.is_set():
            #with self.queue_lock:
            self.queue.put(self.receive())
            #self.queue.put_nowait(self.receive())
            #if self.queue.full():
            #    try:
            #        self.queue.get_nowait()
            #    except:
            #        pass

    def stop(self):
        self._exit.set()
        # This leaves the process hanging on Windows
        #self.join(STOP_TIMEOUT)
        if self.is_alive():
            #TODO make a nicer warning
            print 'Terminating updater:', self
            self.terminate()

    def receive(self):
        raise NotImplementedError
Exemplo n.º 11
0
class MistProcess(Process):

    def __init__(self, gpio, sleep=1, name='MistProcess'):
        Process.__init__(self, name=name)
        self.logger = multiprocessing.get_logger()
        self.event = Event()
        self.name = name
        self.gpio = gpio
        self.sleep = sleep
        self.mist = mraa.Gpio(self.gpio)
        self.mist.dir(mraa.DIR_OUT)

    def _mist_on(self):
        self.logger.debug('Mist on')
        self.mist.write(1)

    def _mist_off(self):
        self.logger.debug('Mist off')
        if self.mist:
            self.mist.write(0)

    def run(self):
        self.event.set()
        self.logger.debug('PID: %d' % multiprocessing.current_process().pid)

        while self.event.is_set():
            self._mist_on()
            time.sleep(self.sleep)

    def stop(self):
        self.logger.debug('Process {} will halt.'.format(self.name))
        self.event.clear()
        self._mist_off()
Exemplo n.º 12
0
    def execute_action(self, action):
        event = Event()
        queue = Queue()
        proc = Process(
            target=execute_action_proc,
            args=(self.execute, action, event, queue))
        proc.start()

        # Send heartbeat.
        heartbeat_retry = 0
        while not event.is_set():
            event.wait(config.ACTIVITY_HEARTBEAT_INTERVAL)
            try:
                res = self.heartbeat(self.task_token)
                if res['cancelRequested']:
                    proc.terminate()
                    proc.join()
                    return Result('cancelled', -1, '', '', '', -1)
            except Exception as err:
                if heartbeat_retry <= config.ACTIVITY_HEARTBEAT_MAX_RETRY:
                    heartbeat_retry += 1
                    continue
                else:
                    proc.terminate()
                    proc.join()
                    raise

        # Evaluate the result.
        result = queue.get_nowait()
        proc.join()
        return result
Exemplo n.º 13
0
def test_sentinel():
    start_event = Event()
    stop_event = Event()
    stop_event.set()
    s = Sentinel(stop_event, start_event, list_key='sentinel_test:q')
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
Exemplo n.º 14
0
def test_sentinel():
    start_event = Event()
    stop_event = Event()
    stop_event.set()
    s = Sentinel(stop_event, start_event, broker=get_broker('sentinel_test:q'))
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
Exemplo n.º 15
0
class World (Process):
    """ A group of particles that can interact with each other """

    def __init__(self, plane, send, update_rate=60):
        Process.__init__(self)
        self.plane = plane
        self.send = send
        self.update_interval = 1/update_rate
        self.exit = Event()
        logger.info('Initialised')

    def run(self):
        logger.info('Running')
        previous_update = current_time()
        while not self.exit.is_set():
            # Update each particle
            for particle in self.plane:
                particle.update()

            # stuff and things
            self.send.send(self.plane)

            # Sleep to maintain update rate
            update_delay = self.update_interval - (current_time() - previous_update)
            previous_update = current_time()
            sleep(max(0, update_delay))

    def terminate(self):
        logger.info('Exiting')
        self.exit.set()
Exemplo n.º 16
0
class BroadcastClient(Process):
    def __init__(self, port, datagram_size, name="BroadcastClient"):
        Process.__init__(self, name=name)
        self.logger = multiprocessing.get_logger()
        self.event = Event()
        self.port = port
        self.datagram_size = datagram_size
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.sock.settimeout(1)
        self.sock.bind(("", self.port))

    def run(self):
        self.event.set()
        self.logger.debug("PID: %d" % multiprocessing.current_process().pid)
        while self.event.is_set():
            try:
                message, (ip, port) = self.sock.recvfrom(self.datagram_size)
                teacher_discovered.send(sender=self)
                self.logger.debug("Received: %s from: %s" % (message, ip))
            except socket.timeout:
                self.logger.debug("%s timeout" % multiprocessing.current_process().name)
            time.sleep(1)

    def stop(self):
        self.logger.debug("Client will halt.")
        self.event.clear()
        self.sock.close()
        self.terminate()
Exemplo n.º 17
0
class Logger(object):
    def __init__(self, filename):
        self.qtag = Queue()
        self.done = Event()
        self.tag = None
        self.filename = filename
        self.file = None
    def start(self):
        self.file = open(self.filename, 'w')
        print 'Opened',self.filename,'for writing.'
    def set_tag(self, tag):
        self.qtag.put(tag)
    def set_done(self):
        self.done.set()
    def log(self, nodeid, msgid, data):
        if not self.qtag.empty():
            self.tag = self.qtag.get()
        if self.done.is_set():
            self.done.clear()
            return True
        L = ['%f'%time.time(), '%d'%nodeid, '%d'%msgid] + map(str,data)
        if self.tag:
            L.append(self.tag)
        print >>self.file, ','.join(L)
        self.file.flush()
    def close(self):
        if self.file:
            self.file.close()
            print 'File closed.'
Exemplo n.º 18
0
    def face_proc(self, child_face_recog: Pipe, e_new_person: Event):
        """
        Parallel process of saving people for face recognition

        Arguments:
            child_face_recog {Pipe} -- pipe for communication with parent process,
                recieve ROI ndarray type of recognized object
        """
        if not os.path.exists('humans'):
            print('created', os.getcwd())
            os.mkdir('humans')
        else:
            print('exist')
        os.chdir(os.path.join(os.getcwd(), 'humans'))
        is_first = True
        counter = 0
        while True:
            if e_new_person.is_set():
                counter = 0
                if not is_first:
                    os.chdir('..')
                new_dir = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
                os.mkdir(new_dir)
                print('Created', os.getcwd() + new_dir)
                os.chdir(os.path.join(os.getcwd(), new_dir))
                e_new_person.clear()
                is_first = False
            image = child_face_recog.recv()
            cv.imwrite(filename=str(counter) + '.jpg', img=image)
            print('image saved:', os.getcwd() + str(counter) + '.jpg')
            counter += 1
Exemplo n.º 19
0
def test_recycle(r):
    # set up the Sentinel
    list_key = 'test_recycle_test:q'
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    start_event = Event()
    stop_event = Event()
    # override settings
    Conf.RECYCLE = 2
    Conf.WORKERS = 1
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, list_key=list_key)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    task_queue = Queue()
    result_queue = Queue()
    # push two tasks
    pusher(task_queue, stop_event, list_key=list_key, r=r)
    pusher(task_queue, stop_event, list_key=list_key, r=r)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value('f', -1))
    # check if the work has been done
    assert result_queue.qsize() == 2
    # save_limit test
    Conf.SAVE_LIMIT = 1
    result_queue.put('STOP')
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    r.delete(list_key)
Exemplo n.º 20
0
class DeviceServer(ThreadedTCPServer, Process):
	
	#causes handle_request to return
	timeout = 1
	
	def __init__(self, mux, muxdevice, server_address, RequestHandlerClass):
		Process.__init__(self)
		ThreadedTCPServer.__init__(self, server_address, RequestHandlerClass)
		self.mux = mux
		self.muxdev = muxdevice
		self._stop = Event()

	def stop(self):
		self._stop.set()
		
	def stopped(self):
		return self._stop.is_set()

	def run(self):
		if self.stopped():
			_LOGGER.warning("Thread already stopped")
		
		while not self.stopped():
			self.handle_request()
		self.socket.close()
		_LOGGER.debug("%s will exit now" % (str(self)))
Exemplo n.º 21
0
class AMQPInput(BaseInput):

	log = logging.getLogger("%s.AMQPInput" %(__name__))

	def __init__(self, aggr_config, namespace="local", config={}):
		super(AMQPInput, self).__init__(aggr_config, namespace, config)
		self.exit = Event()

	def run(self):
		self.log.info("Connecting to (%s): %s" %(self.outputType, self.outputUri))
		aggrConn = self.connectAggregator()

		rabbitmqConsumer = RabbitMQConsumer(**self.config)

		def callback(event):
			if not event.has_key('namespace'):
				event['namespace'] = self.namespace

			aggrConn.send(event)

			if self.exit.is_set():
				aggrConn.close()
				rabbitmqConsumer.stop()

		rabbitmqConsumer.userCallbacks = [ callback ]	
		rabbitmqConsumer.start()

	def shutdown(self):
		self.exit.set()
Exemplo n.º 22
0
def run_server_until_file_change(serve_forever_path, verbosity=0):
    files_queue = Queue()
    file_changed = Event()

    worker = Worker(serve_forever_path, files_queue, os.getpid())
    worker.daemon = True

    monitor = Monitor(lambda: file_changed.set())
    monitor.start()
    worker.start()

    print("started server with PID %s" % worker.pid)

    while (not file_changed.is_set()) and worker.is_alive():
        try:
            path = files_queue.get(timeout=1)
        except queue.Empty:
            pass
        else:
            if path.endswith('testapp.py'):
                print("adding %s to paths" % path)
            monitor.add_path(path)

    monitor.stop()
    print("Waiting for monitor thread in %s to quit" % worker.pid)
    monitor.join()
    print("Waiting for server %s to quit" % worker.pid)
    worker.terminate()
    worker.join()
    print("server with PID %s done" % worker.pid)
Exemplo n.º 23
0
class DataProcess(Process):
    def __init__(self, data_pipeline, **get_batch_kwargs):
        super(DataProcess, self).__init__(name='neuralnilm-data-process')
        self._stop = Event()
        self._queue = Queue(maxsize=3)
        self.data_pipeline = data_pipeline
        self._get_batch_kwargs = get_batch_kwargs

    def run(self):
        batch = self.data_pipeline.get_batch(**self._get_batch_kwargs)
        while not self._stop.is_set():
            try:
                self._queue.put(batch)
            except AssertionError:
                # queue is closed
                break
            batch = self.data_pipeline.get_batch(**self._get_batch_kwargs)

    def get_batch(self, timeout=30):
        if self.is_alive():
            return self._queue.get(timeout=timeout)
        else:
            raise RuntimeError("Process is not running!")

    def stop(self):
        self._stop.set()
        self._queue.close()
        self.terminate()
        self.join()
Exemplo n.º 24
0
class CaptureProcess(Process):
    """A process that fills a queue with images as captured from 
    a camera feed"""
    def __init__(self, capture, imageQueue):
        Process.__init__(self, name="Capture")
        self.imageQueue = imageQueue
        self.capture = capture
        self.keepGoing = Event()
        self.keepGoing.set()
        self.daemon = True

    def run(self):
        print "CaptureProcess pid: %s" % (self.pid,)
        while self.keepGoing.is_set():
            image = captureImage(self.capture)
#            sys.stdout.write(".")
            try:
                self.imageQueue.put(serializeImage(image), block=True, timeout=0.25)
            except FullException:
                try:
                    _ = self.imageQueue.get_nowait()
                except:
                    pass  # Try to clear the queue, but don't worry if someone snatches it first
    def stop(self):
        self.keepGoing.clear()
Exemplo n.º 25
0
def do_the_transfer(source, target, cmd, parameters, filename, size, runname, clean=False, logfile=None):

    logger.debug("starting transfer")

    if ("gsiftp" in source) or ("gsiftp" in target):
        if not cmd:
            cmd = "globus-url-copy"

        transfer = Gridftp(source+filename, target+filename, cmd, parameters)
    else:
        if not cmd:
            cmd = 'scp'
        transfer = Scp(source+filename, target+filename, cmd, parameters)

    results_sizes[runname] = size
    results_transfer[runname] = transfer

    file_temp = get_source_file(size, working_directory, clean)

    transfer.prepare(file_temp, clean)

    #logger.info("Starting transfer:\n\t"+str(gridftp))

    transfer_cmd = transfer.transfer_command()

    event = Event()
    e = threading.Thread(target=execution, args=(event, transfer))
    t = threading.Thread(target=timing, args=(event, size, results_time, results_speed, runname, transfer, logfile))
    t.start()
    e.start()
    while not event.is_set():
        time.sleep(1)
Exemplo n.º 26
0
class BroadcastServer(Process):
    def __init__(self, ip, port, message, name="BroadcastServer"):
        Process.__init__(self, name=name)
        self.logger = multiprocessing.get_logger()
        self.event = Event()
        self.message = message
        self.ip = ip
        self.port = port
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
        self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.sock.bind(("", 0))

    def run(self):
        self.event.set()
        self.logger.debug("PID: %d" % multiprocessing.current_process().pid)
        while self.event.is_set():
            self.logger.debug("Sending: %s" % self.message)
            self.sock.sendto(self.message, (self.ip, self.port))
            time.sleep(1)

    def stop(self):
        self.logger.debug("Server will halt.")
        self.event.clear()
        self.terminate()
Exemplo n.º 27
0
class StoppableProcess(Process):
    """ Base class for Processes which require the ability
    to be stopped by a process-safe method call
    """

    def __init__(self):
        self._should_stop = Event()
        self._should_stop.clear()
        super(StoppableProcess, self).__init__()

    def join(self, timeout=0):
        """ Joins the current process and forces it to stop after
        the timeout if necessary

        :param timeout: Timeout duration in seconds
        """
        self._should_stop.wait(timeout)
        if not self.should_stop():
            self.stop()
        super(StoppableProcess, self).join(0)

    def stop(self):
        self._should_stop.set()

    def should_stop(self):
        return self._should_stop.is_set()

    def __repr__(self):
        return "<%s(should_stop=%s)>" % (
            self.__class__.__name__, self.should_stop())
Exemplo n.º 28
0
Arquivo: ng.py Projeto: ArturFis/grab
def start_spider(spider_cls):
    try:
        result_queue = Queue()
        network_response_queue = Queue()
        shutdown_event = Event()
        generator_done_event = Event()
        taskq = QueueBackend('ng')

        #from grab.spider.base import logger_verbose
        #logger_verbose.setLevel(logging.DEBUG)

        kwargs = {
            'taskq': taskq,
            'result_queue': result_queue,
            'network_response_queue': network_response_queue,
            'shutdown_event': shutdown_event,
            'generator_done_event': generator_done_event,
            'ng': True,
        }

        # Generator: OK
        generator_waiting_shutdown_event = Event()
        bot = spider_cls(waiting_shutdown_event=generator_waiting_shutdown_event, **kwargs)
        generator = Process(target=bot.run_generator)
        generator.start()

        # Downloader: OK
        downloader_waiting_shutdown_event = Event()
        bot = spider_cls(waiting_shutdown_event=downloader_waiting_shutdown_event,
                         **kwargs)
        downloader = Process(target=bot.run)
        downloader.start()

        # Parser: OK
        events = []
        for x in xrange(2):
            parser_waiting_shutdown_event = Event()
            events.append(parser_waiting_shutdown_event)
            bot = spider_cls(waiting_shutdown_event=parser_waiting_shutdown_event,
                             **kwargs)
            parser = Process(target=bot.run_parser)
            parser.start()

        while True:
            time.sleep(2)
            print('task size', taskq.size())
            print('response size', network_response_queue.qsize())
            if (downloader_waiting_shutdown_event.is_set() and
                all(x.is_set() for x in events)):
                shutdown_event.set()
                break

        time.sleep(1)

        print('done')
    finally:
        for child in active_children():
            logging.debug('Killing child process (pid=%d)' % child.pid)
            child.terminate()
Exemplo n.º 29
0
 def _start_async_server(self):
     self.server = LiveSyncSocketServer(port=self.liveport)
     server_started_event = Event()
     self.server_process = Process(target=self.server.start, args=(server_started_event,))
     self.server_process.daemon = True
     self.server_process.start()
     server_started_event.wait(timeout=0.1)
     return server_started_event.is_set()
Exemplo n.º 30
0
class Connector(object):
    def __init__(self, reply_generator: ConnectorReplyGenerator, connectors_event: Event):
        self._reply_generator = reply_generator
        self._scheduler = None
        self._thread = Thread(target=self.run)
        self._write_queue = Queue()
        self._read_queue = Queue()
        self._frontends_event = connectors_event
        self._shutdown_event = Event()
        self._muted = True

    def give_nlp(self, nlp):
        self._reply_generator.give_nlp(nlp)

    def start(self):
        self._scheduler.start()
        self._thread.start()

    def run(self):
        while not self._shutdown_event.is_set():
            message = self._scheduler.recv(timeout=0.2)
            if self._muted:
                self._scheduler.send(None)
            elif message is not None:
                # Receive the message and put it in a queue
                self._read_queue.put(message)
                # Notify main program to wakeup and check for messages
                self._frontends_event.set()
                # Send the reply
                reply = self._write_queue.get()
                self._scheduler.send(reply)

    def send(self, message: str):
        self._write_queue.put(message)

    def recv(self) -> Optional[ConnectorRecvMessage]:
        if not self._read_queue.empty():
            return self._read_queue.get()
        return None

    def shutdown(self):
        # Shutdown event signals both our thread and process to shutdown
        self._shutdown_event.set()
        self._scheduler.shutdown()
        self._thread.join()

    def generate(self, message: str, doc: Doc=None) -> str:
        return self._reply_generator.generate(message, doc)

    def mute(self):
        self._muted = True

    def unmute(self):
        self._muted = False

    def empty(self):
        return self._read_queue.empty()
Exemplo n.º 31
0
class MainProcess(Process):
    ## Constructor
    #
    # Inits the process, the CSV file, the PCA9547 Multiplexor, the settings
    # file for each sensor (with calibration)
    # @param self The object pointer
    def __init__(self):
        Process.__init__(self)
        self.exit = Event()

        self.csv = CSVExport()
        self.i2cMux = PCA9547()

        self.settings0 = RTIMU.Settings(SETTINGS_FILE_0)
        self.settings1 = RTIMU.Settings(SETTINGS_FILE_1)
        self.settings2 = RTIMU.Settings(SETTINGS_FILE_2)
        self.settings3 = RTIMU.Settings(SETTINGS_FILE_3)
        self.settings4 = RTIMU.Settings(SETTINGS_FILE_4)
        self.settings5 = RTIMU.Settings(SETTINGS_FILE_5)

        self.imu0 = RTIMU.RTIMU(self.settings0)
        self.imu1 = RTIMU.RTIMU(self.settings1)
        self.imu2 = RTIMU.RTIMU(self.settings2)
        self.imu3 = RTIMU.RTIMU(self.settings3)
        self.imu4 = RTIMU.RTIMU(self.settings4)
        self.imu5 = RTIMU.RTIMU(self.settings5)

        self.settings = [
            self.settings0, self.settings1, self.settings2, self.settings3,
            self.settings4, self.settings5
        ]
        self.imus = [
            self.imu0, self.imu1, self.imu2, self.imu3, self.imu4, self.imu5
        ]

        self.detectedIMU = [False] * 6
        self.detectImu()

        self.pollInterval = 2

## Detectes the number of IMUs avaliable in each channel
#
# @param self The object pointer
# @return number of detected IMUs

    def detectImu(self):
        try:
            for n, imu in enumerate(self.imus):
                self.i2cMux.setChannel(n)
                if imu.IMUInit():
                    self.detectedIMU[n] = True
                else:
                    self.detectedIMU[n] = False
        except:
            print("Error in detection")
        return self.detectedIMU

    def setPollInterval(self, pollInterval):
        self.pollInterval = pollInterval

## Sets file name for the CSV file
#
# @param self The object pointer
# @param txt file name

    def setFileName(self, txt):
        self.csv.setTitle(txt)

## gets the current data from the current sensor
#
# @param self The object pointer
# @return all sensor data as a vector

    def getData(self):
        return self.allData

## stops the thread and signals to exit
#
# @param self The object pointer

    def stop(self):
        self.exit.set()

    def saveSettings(self):
        try:
            for s in self.setting:
                s.save()
        except:
            print("Error in detection")
        return self.detectedIMU

## Thread loop
#
# Get data in the polling interval definid time, and stores
# data form all the avaliable sensor with his identificator and timestamp
# @param self The object pointer
# @return finalized job

    def run(self):
        self.csv.createFile()
        init_time = time.time()
        while not self.exit.is_set():
            try:
                for n, imu in enumerate(self.imus):
                    if self.detectedIMU[n]:
                        self.i2cMux.setChannel(n)
                        if imu.IMURead():
                            data0 = imu.getIMUData()
                            quaternion = data0["fusionQPose"]
                            acceleration = data0["accel"]
                            gyro = data0["gyro"]
                            compass = data0["compass"]

                            nowTime = time.time() - init_time

                            self.allData = [n] + \
                                      [nowTime] + \
                                      [acceleration[0]] + \
                                      [acceleration[1]] + \
                                      [acceleration[2]] + \
                                      [gyro[0]] + \
                                      [gyro[1]] + \
                                      [gyro[2]] + \
                                      [compass[0]] + \
                                      [compass[1]] + \
                                      [compass[2]] + \
                                      [quaternion[0]] + \
                                      [quaternion[1]] + \
                                      [quaternion[2]] + \
                                      [quaternion[3]]

                            self.csv.csvWrite(self.allData)
                            #print(self.allData)
                            time.sleep(self.pollInterval * 1.0 / 1000.0)
            except (KeyboardInterrupt, SystemExit):
                print("Finishing Thread bad")
                self.exit.set()
        print("Finished Adquisition")
        return
Exemplo n.º 32
0
class Cluster:
    def __init__(self, broker: Broker = None):
        self.broker = broker or get_broker()
        self.sentinel = None
        self.stop_event = None
        self.start_event = None
        self.pid = current_process().pid
        self.cluster_id = uuid.uuid4()
        self.host = socket.gethostname()
        self.timeout = Conf.TIMEOUT
        signal.signal(signal.SIGTERM, self.sig_handler)
        signal.signal(signal.SIGINT, self.sig_handler)

    def start(self) -> int:
        # Start Sentinel
        self.stop_event = Event()
        self.start_event = Event()
        self.sentinel = Process(
            target=Sentinel,
            args=(
                self.stop_event,
                self.start_event,
                self.cluster_id,
                self.broker,
                self.timeout,
            ),
        )
        self.sentinel.start()
        logger.info(_(f"Q Cluster {self.name} starting."))
        while not self.start_event.is_set():
            sleep(0.1)
        return self.pid

    def stop(self) -> bool:
        if not self.sentinel.is_alive():
            return False
        logger.info(_(f"Q Cluster {self.name} stopping."))
        self.stop_event.set()
        self.sentinel.join()
        logger.info(_(f"Q Cluster {self.name} has stopped."))
        self.start_event = None
        self.stop_event = None
        return True

    def sig_handler(self, signum, frame):
        logger.debug(
            _(f'{current_process().name} got signal {Conf.SIGNAL_NAMES.get(signum, "UNKNOWN")}'
              ))
        self.stop()

    @property
    def stat(self) -> Status:
        if self.sentinel:
            return Stat.get(pid=self.pid, cluster_id=self.cluster_id)
        return Status(pid=self.pid, cluster_id=self.cluster_id)

    @property
    def name(self) -> str:
        return humanize(self.cluster_id.hex)

    @property
    def is_starting(self) -> bool:
        return self.stop_event and self.start_event and not self.start_event.is_set(
        )

    @property
    def is_running(self) -> bool:
        return self.stop_event and self.start_event and self.start_event.is_set(
        )

    @property
    def is_stopping(self) -> bool:
        return (self.stop_event and self.start_event
                and self.start_event.is_set() and self.stop_event.is_set())

    @property
    def has_stopped(self) -> bool:
        return self.start_event is None and self.stop_event is None and self.sentinel
Exemplo n.º 33
0
class Thread():
    """ Leverages multiprocessing(Process, Event)
        Wraps a provided function in a thread that can run independently until stopped
        Functions provided should be designed to run a finite number of times so that
          a loop doesn't cause the function to never release control to the thread.
        Setting executions to anything other than exactly int() > 0 will cause the thread
          to run until stopped.

        For functions that modify class attributes, the attribute must be set up to use
        a shared memory variable, otherwise the attribute change won't persist the thread.

        See camera.Camera().capture() for an example
    """
    def __init__(self,
                 target,
                 *args,
                 executions: int = 0,
                 start=False,
                 **kwargs):
        self.inputs = locals()  # For posterity
        self._target = target
        self._args = args
        self._kwargs = kwargs
        self._stop_event = Event()
        self.executions = executions if isinstance(executions, int) else 0
        self.results = list()  # So results are retrievable
        self._setup()
        if start:
            self.start()

    def _setup(self):
        # Spawns a process
        self.process = Process(target=self._target,
                               args=self._args,
                               kwargs=self._kwargs)
        # override the default Process().run() method with Thread()._run()
        self.process.run = self._run

    def start(self):
        # Start the process in this thread
        self.process.start()

    def stop(self, *args, **kwargs):
        # Shortcut for stop_gracefully()
        return self.stop_gracefully(*args, **kwargs)

    def stop_gracefully(self, *args, **kwargs):
        """ Allow the thread to come to the end of an iteration, and stop.
        """
        self._stop_event.set()
        self.process.join(*args, **kwargs)
        while self.is_alive():
            time.sleep(0.001)  # check every millisecond
        self.process.close()
        return True

    def stop_immediately(self, *args, **kwargs):
        """ Send a SIGKILL to the thread, halting it without allowing the thread
            to come to the end of an iteration.
        """
        self._stop_event.set()
        self.process.kill(*args, **kwargs)
        while self.is_alive():
            time.sleep(0.001)  # check every millisecond
        self.process.close()
        return True

    def restart(self):
        """ Restart the thread if stopped, or stop_immediately, reset, and start()
        """
        if not self._stop_event.is_set():
            self.stop_immediately()
        self._stop_event.clear()
        self._setup()
        self.start()

    def is_alive(self):
        return self.process.is_alive()

    def _run(self):
        """ This method is used to replace the default Process().run() method,
              which is executed on Process().start()
        """
        if self.executions > 0:
            # Run n times
            remaining = self.executions
            while remaining > 0:
                self.results.append(self._target(*self._args, **self._kwargs))
                remaining -= 1
            # Cleanup
            self.process.close()
        else:
            # Run until stopped
            while not self._stop_event.is_set():
                self.results.append(self._target(*self._args, **self._kwargs))
Exemplo n.º 34
0
        '''
        if self.con.acquire():
            while self.last_cnt >=  self.counter:
                self.con.wait()
            index = self.last_cnt
            self.last_cnt += 1
            self.con.release()
            return self.container[index]
        '''
    def close(self):
        self.socket.close()
lock = threading.Lock()#互斥锁
con = threading.Condition()#为了轮流读取两个服务器的数据,不需要互斥锁了
event = Event()
print(" is start receive sensor data ? ",end = ":")
print(event.is_set())
mythread1 = myThread(host1,port1,con,event)
mythread1.start()
event.set()
print(" is start receive sensor data ? ",end = ":")
def showData(data):
    for item in data:
        print(np.array(item))
    print("================")

i = 0 
thresh = 40
def saveImageData(sensor1,path,avgtemp):
    np.save(path+"/imagedata.npy",np.array(sensor1))
    np.save(path+"/avgtemp.npy",avgtemp)
i = 0 
Exemplo n.º 35
0
class WalletRPCManager(ProcessManager):
    def __init__(self,
                 resources_path,
                 wallet_file_path,
                 wallet_password,
                 app,
                 log_level=1,
                 enable_ssl=False):
        self.user_agent = str(uuid4().hex)
        enable_ssl = False
        wallet_log_path = os.path.join(os.path.dirname(wallet_file_path),
                                       "ryo-wallet-rpc.log")

        log_level = 2

        wallet_rpc_args = u'%s/bin/ryo-wallet-rpc --disable-rpc-login --prompt-for-password --daemon-address %s --wallet-file %s --log-file %s --rpc-bind-port %d --log-level %d' \
                                            % (resources_path, REMOTE_DAEMON_ADDRESS, wallet_file_path, wallet_log_path, WALLET_RPC_PORT, log_level)

        print(wallet_rpc_args)
        ProcessManager.__init__(self, wallet_rpc_args, "ryo-wallet-rpc")
        sleep(0.2)
        self.send_command(wallet_password)

        self.rpc_request = WalletRPCRequest(app, self.user_agent, enable_ssl)
        #         self.rpc_request.start()
        self._stopped = False
        self._ready = Event()
        self.block_height = 0
        self.is_password_invalid = Event()
        self.last_log_lines = []
        self.last_error = ""

    def run(self):
        rpc_ready_strs = [
            "Binding on 127.0.0.1:%d" % WALLET_RPC_PORT,
            "Starting wallet RPC server", "Run net_service loop",
            "Refresh done", "RPC server ready"
        ]
        err_str = "ERROR"
        invalid_password_str = "invalid password"
        height_regex = re.compile(
            r"Processed block: \<([a-z0-9]+)\>, height (\d+)")
        height_regex2 = re.compile(r"Skipped block by height: (\d+)")
        height_regex3 = re.compile(
            r"Skipped block by timestamp, height: (\d+)")

        for line in iter(self.proc.stdout.readline, b''):
            if self._stopped: break

            m_height = height_regex.search(line)
            if m_height: self.block_height = m_height.group(2)
            if not m_height:
                m_height = height_regex2.search(line)
                if m_height: self.block_height = m_height.group(1)
            if not m_height:
                m_height = height_regex3.search(line)
                if m_height: self.block_height = m_height.group(1)

            if not self._ready.is_set() and any(s in line
                                                for s in rpc_ready_strs):
                self._ready.set()
                log("RPC server ready!", LEVEL_INFO, self.proc_name)

            if err_str in line:
                self.last_error = line.rstrip()
                if not self.is_password_invalid.is_set(
                ) and invalid_password_str in line:
                    self.is_password_invalid.set()
                    log("ERROR: Invalid wallet password", LEVEL_ERROR,
                        self.proc_name)
                else:
                    log(self.last_error, LEVEL_ERROR, self.proc_name)
            elif m_height:
                log(line.rstrip(), LEVEL_INFO, self.proc_name)
            else:
                log(line.rstrip(), LEVEL_DEBUG, self.proc_name)

            if len(self.last_log_lines) > 1:
                self.last_log_lines.pop(0)
            self.last_log_lines.append(line[:120])

        if not self.proc.stdout.closed:
            self.proc.stdout.close()

    def is_ready(self):
        return self._ready.is_set()

    def is_invalid_password(self):
        return self.is_password_invalid.is_set()

    def stop(self, force=False):
        if not force: self.rpc_request.stop_wallet()
        if self.is_proc_running():
            counter = 0
            while True:
                if self.is_proc_running():
                    if counter < 60:
                        sleep(1)
                        counter += 1
                    else:
                        self.proc.kill()
                        log("[%s] killed" % self.proc_name, LEVEL_INFO,
                            self.proc_name)
                        break
                else:
                    break

        self._stopped = True
        self._ready = Event()
        self.block_height = 0
        self.is_password_invalid = Event()
        self.last_log_lines = []
        self.last_error = ""

        log("[%s] stopped" % self.proc_name, LEVEL_INFO, self.proc_name)
Exemplo n.º 36
0
class DataProcess(Process):
    def __init__(self, data_queue, data_paths, repeat=True):
        '''
        data_queue : Multiprocessing queue
        data_paths : list of data and label pair used to load data
        repeat : if set True, return data until exit is set
        '''
        super(DataProcess, self).__init__()
        # Queue to transfer the loaded mini batches
        self.data_queue = data_queue
        self.data_paths = data_paths
        self.num_data = len(data_paths)
        self.repeat = repeat

        # Tuple of data shape
        self.batch_size = cfg.CONST.BATCH_SIZE
        self.exit = Event()
        self.shuffle_db_inds()

    def shuffle_db_inds(self):
        # Randomly permute the training roidb
        if self.repeat:
            self.perm = np.random.permutation(np.arange(self.num_data))
        else:
            self.perm = np.arange(self.num_data)
        self.cur = 0

    def get_next_minibatch(self):
        if (self.cur + self.batch_size) >= self.num_data and self.repeat:
            self.shuffle_db_inds()

        db_inds = self.perm[self.cur:min(self.cur +
                                         self.batch_size, self.num_data)]
        self.cur += self.batch_size
        return db_inds

    def shutdown(self):
        self.exit.set()

    @print_error
    def run(self):
        iteration = 0
        # Run the loop until exit flag is set
        while not self.exit.is_set() and self.cur <= self.num_data:
            # Ensure that the network sees (almost) all data per epoch
            db_inds = self.get_next_minibatch()

            data_list = []
            label_list = []
            for batch_id, db_ind in enumerate(db_inds):
                datum = self.load_datum(self.data_paths[db_ind])
                label = self.load_label(self.data_paths[db_ind])

                data_list.append(datum)
                label_list.append(label)

            batch_data = np.array(data_list).astype(np.float32)
            batch_label = np.array(label_list).astype(np.float32)

            # The following will wait until the queue frees
            self.data_queue.put((batch_data, batch_label), block=True)
            iteration += 1

    def load_datum(self, path):
        pass

    def load_label(self, path):
        pass
Exemplo n.º 37
0
class ClockBaseInterruptBehavior(ClockBaseBehavior):
    '''A kivy clock which can be interrupted during a frame to execute events.
    '''

    interupt_next_only = False
    _event = None
    _get_min_timeout_func = None

    def __init__(self, interupt_next_only=False, **kwargs):
        super(ClockBaseInterruptBehavior, self).__init__(**kwargs)
        self._event = MultiprocessingEvent() if PY2 else ThreadingEvent()
        self.interupt_next_only = interupt_next_only
        self._get_min_timeout_func = self.get_min_timeout

    def usleep(self, microseconds):
        self._event.clear()
        self._event.wait(microseconds / 1000000.)

    def on_schedule(self, event):
        fps = self._max_fps
        if not fps:
            return

        if not event.timeout or (
                not self.interupt_next_only
                and event.timeout <= 1 / fps -  # remaining time
            (self.time() - self._last_tick) +  # elapsed time
                4 / 5. * self.get_resolution()):  # resolution fudge factor
            self._event.set()

    def idle(self):
        fps = self._max_fps
        event = self._event
        resolution = self.get_resolution()
        if fps > 0:
            done, sleeptime = self._check_ready(fps, resolution,
                                                4 / 5. * resolution)
            if not done:
                event.wait(sleeptime)

        current = self.time()
        self._dt = current - self._last_tick
        self._last_tick = current
        event.clear()
        # anything scheduled from now on, if scheduled for the upcoming frame
        # will cause a timeout of the event on the next idle due to on_schedule
        # `self._last_tick = current` must happen before clear, otherwise the
        # on_schedule computation is wrong when exec between the clear and
        # the `self._last_tick = current` bytecode.
        return current

    def _check_ready(self, fps, min_sleep, undershoot):
        if self._event.is_set():
            return True, 0

        t = self._get_min_timeout_func()
        if not t:
            return True, 0

        if not self.interupt_next_only:
            curr_t = self.time()
            sleeptime = min(1 / fps - (curr_t - self._last_tick), t - curr_t)
        else:
            sleeptime = 1 / fps - (self.time() - self._last_tick)
        return sleeptime - undershoot <= min_sleep, sleeptime - undershoot
Exemplo n.º 38
0
class MyApp(QMainWindow):
    def __init__(self):
        super(MyApp, self).__init__()
        self.ui = Ui_MainWindow()
        self.ui.closeEvent = self.closeEvent
        self.ui.setupUi(self)

        self.botinstances = []
        self.cookies = []
        self.newrow = []
        self.killevents = []
        self.count = 0
        self.ui.addRow.clicked.connect(self.AddRow)
        self.ui.verify.clicked.connect(self.verification)
        self.ui.runAll.hide()
        self.ui.runAll.clicked.connect(self.runall)
        self.ui.actionOpen.setShortcut("Ctrl+R")
        self.ui.actionOpen.triggered.connect(self.open)
        self.ui.actionOpen.setShortcut("Ctrl+O")
        self.ui.actionSave.triggered.connect(self.save)
        self.ui.actionSave.setShortcut("Ctrl+S")

        # Start cookie listening
        self.qcookies = Queue()
        self.endcookies = Event()
        self.cookieplacer = threading.Thread(target=self.CookieListener)
        self.cookieplacer.start()

    def save(self):
        Qname = QFileDialog.getSaveFileName(self, 'Save file', '',
                                            "HolyCopBot Files (*.hcb)")
        if Qname:
            name = Qname[0]
            linklist = []
            proxylist = []
            sizelist = []
            for i in range(self.count):
                linklist.append(self.newrow[i].urlInput.text())
                proxylist.append(self.newrow[i].proxy.text() + ":" +
                                 self.newrow[i].proxyport.text())
                sizelist.append(str(self.newrow[i].sizeBox.currentText()))
            hcbexport.all(name, linklist, proxylist, sizelist)

    def open(self):
        Qname = QFileDialog.getOpenFileName(self, 'Open file', '',
                                            "HolyCopBot Files (*.hcb)")[0]
        if Qname:
            name = Qname
            filecontents = hcbimport.all(name)
            linklist = filecontents[0]
            proxylist = filecontents[1]
            proxyportlist = filecontents[2]
            sizelist = filecontents[3]
            for i in range(len(linklist)):
                newest = self.count
                self.AddRow()
                self.newrow[newest].urlInput.setText(linklist[i])
                self.newrow[newest].proxy.setText(proxylist[i])
                self.newrow[newest].proxyport.setText(proxyportlist[i])
                index = self.newrow[newest].sizeBox.findText(
                    sizelist[i], QtCore.Qt.MatchFixedString)
                if index >= 0:
                    self.newrow[newest].sizeBox.setCurrentIndex(index)

    def verification(self):
        # api verification check
        key = self.ui.verifyKey.text()

        verified = apicall.verify(key)
        if verified:
            self.ui.verify.hide()
            self.ui.verifyKey.hide()
            self.ui.verifyLabel.hide()
            self.ui.runAll.show()
            self.ui.addRow.setEnabled(True)
            self.ui.actionOpen.setEnabled(True)
            self.ui.actionSave.setEnabled(True)
        else:
            reply = QMessageBox.question(
                self, 'Not Registered', "Your verification key was incorrect",
                QMessageBox.Ok)
            if reply == QMessageBox.Ok:
                self.close()

    def runall(self):
        if self.count == 0:
            return
        else:
            self.ui.runAll.setEnabled(False)
            for i in range(self.count):
                self.RunBot(i)

    def AddRow(self):
        #limit of concurrent bots
        if self.count > 20:
            return

        self.newrow.append(RowWidget(self.ui, self.count))
        somenum = int(self.newrow[self.count].countLabel.text())
        self.ui.verticalLayout_2.addWidget(self.newrow[self.count].widget)
        self.newrow[self.count].run.clicked.connect(
            lambda: self.RunBot(somenum))
        self.newrow[self.count].openCart.clicked.connect(
            lambda: self.OpenCart(somenum))
        self.newrow[self.count].stop.clicked.connect(
            lambda: self.StopProcess(somenum))
        self.count += 1

    def RunBot(self, somenum):
        link = self.newrow[somenum].urlInput.text()
        if link == "":
            self.newrow[somenum].invalidUrl.setText("Invalid Url!")
        else:
            self.newrow[somenum].invalidUrl.setText("")
            # get and validate proxy if exists
            self.proxy = self.newrow[somenum].proxy.text(
            ) + ":" + self.newrow[somenum].proxyport.text()
            ipcheck = validateIP.check(self.proxy)
            if ipcheck == False or self.proxy == '':
                self.proxy = 0

            rawsize = str(self.newrow[somenum].sizeBox.currentText())
            if rawsize == "Size":
                self.newrow[somenum].invalidUrl.setText("Invalid Size!")
            if not rawsize == "Size":
                size = rawsize.replace(".", "")
                self.newrow[somenum].run.setText("Running")
                self.newrow[somenum].stop.setEnabled(True)
                self.newrow[somenum].run.setEnabled(False)
                self.newrow[somenum].sizeBox.setEnabled(False)

                imagepreview = threading.Thread(target=self.GetPreviewImage,
                                                args=(link, somenum))
                imagepreview.start()

                self.BotInstance(link, size, somenum)

    def BotInstance(self, link, size, row):
        kill = Event()
        self.killevents.insert(row, kill)
        botprocess = Process(target=launchbot.runbot,
                             args=(link, size, self.proxy, self.qcookies, row,
                                   self.killevents[row]))
        botprocess.start()
        self.botinstances.insert(row, botprocess)

    def CookieListener(self):
        while not self.endcookies.is_set():
            if self.qcookies.qsize() != 0:
                self.ui.runAll.setEnabled(True)
                result = self.qcookies.get()
                placeinrow = result[0]
                # error handling
                if result[1] == "oos":
                    error = "Size Out of Stock!"
                    self.newrow[placeinrow].invalidUrl.setText(error)
                if result[1] == 'forbidden':
                    error = "IP Ban! (temporary)"
                    self.newrow[placeinrow].invalidUrl.setText(error)
                else:
                    newcookies = result[1]
                    self.cookies.insert(placeinrow, newcookies)
                    self.newrow[placeinrow].run.setText("Copped!")
                self.newrow[placeinrow].openCart.setEnabled(True)
                self.newrow[placeinrow].stop.setEnabled(False)

    def OpenCart(self, somenum):
        self.newrow[somenum].run.setText("Run")
        self.newrow[somenum].run.setEnabled(True)
        self.newrow[somenum].sizeBox.setEnabled(True)

        link = self.newrow[somenum].urlInput.text()
        global site
        if "footlocker" in link:
            site = "https://www.footlocker.com/"
        if "footaction" in link:
            site = "https://www.footaction.com/"
        if "eastbay" in link:
            site = "https://www.eastbay.com/"
        if "champssports" in link:
            site = "https://www.champssports.com/"
        cartbrowser = Process(target=launchcart.opencart,
                              args=(site, self.cookies[somenum], self.proxy))
        cartbrowser.start()

    def GetPreviewImage(self, link, rownum):
        img = getpreview.req(link)
        # set pixmap qt object to image returned
        pixmap = QPixmap()
        pixmap.loadFromData(img)
        self.newrow[rownum].previewImage.setPixmap(pixmap)

    def StopProcess(self, rownum):
        # set event for botinstance to kill, then wait for event to be unset, reset gui
        killevent = self.killevents[rownum]
        killevent.set()
        while killevent.is_set():
            time.sleep(.1)
        self.botinstances[rownum].terminate()
        self.newrow[rownum].run.setText("Run")
        self.newrow[rownum].stop.setEnabled(False)
        self.newrow[rownum].run.setEnabled(True)
        self.newrow[rownum].sizeBox.setEnabled(True)

    def closeEvent(self, event):
        if self.count == 0:
            self.endcookies.set()
            sys.exit()
        reply = QMessageBox.question(self, 'Confirm Exit',
                                     "Are you sure you want to quit?",
                                     QMessageBox.Yes | QMessageBox.No,
                                     QMessageBox.No)
        if reply == QMessageBox.Yes:
            self.endcookies.set()
            time.sleep(0.15)
            event.accept()
        else:
            event.ignore()
Exemplo n.º 39
0
class WalletCliManager(ProcessManager):
    fail_to_connect_str = "wallet failed to connect to daemon"

    def __init__(self,
                 resources_path,
                 wallet_file_path,
                 wallet_log_path,
                 restore_wallet=False,
                 restore_height=0):
        if not restore_wallet:
            wallet_args = u'%s/bin/ryo-wallet-cli --daemon-address %s --generate-new-wallet=%s --log-file=%s ' \
                                                % (resources_path, REMOTE_DAEMON_ADDRESS, wallet_file_path, wallet_log_path)
        else:
            restore_height = 0
            wallet_args = u'%s/bin/ryo-wallet-cli --daemon-address %s --log-file=%s --restore-deterministic-wallet --create-address-file --restore-height %d' \
                                                % (resources_path, "fakehost", wallet_log_path, restore_height)
        ProcessManager.__init__(self, wallet_args, "ryo-wallet-cli")
        self.ready = Event()
        self.last_error = ""

    def run(self):
        is_ready_str = "Background refresh thread started"
        err_str = "Error:"
        for line in iter(self.proc.stdout.readline, b''):
            if not self.ready.is_set() and is_ready_str in line:
                self.ready.set()
                log("Wallet ready!", LEVEL_INFO, self.proc_name)
            elif err_str in line:
                self.last_error = line.rstrip()
                log("[%s]>>> %s" % (self.proc_name, line.rstrip()),
                    LEVEL_ERROR, self.proc_name)
            else:
                log("[%s]>>> %s" % (self.proc_name, line.rstrip()),
                    LEVEL_DEBUG, self.proc_name)

        if not self.proc.stdout.closed:
            self.proc.stdout.close()

    def is_ready(self):
        return self.ready.is_set()

    def is_connected(self):
        self.send_command("refresh")
        if self.fail_to_connect_str in self.last_error:
            return False
        return True

    def stop(self):
        if self.is_proc_running():
            self.send_command('exit')
            #self.proc.stdin.close()
            counter = 0
            while True:
                if self.is_proc_running():
                    if counter < 10:
                        if counter == 2:
                            try:
                                self.send_command('exit')
                            except:
                                pass
                        sleep(1)
                        counter += 1
                    else:
                        self.proc.kill()
                        log("[%s] killed" % self.proc_name, LEVEL_INFO,
                            self.proc_name)
                        break
                else:
                    break
        log("[%s] stopped" % self.proc_name, LEVEL_INFO, self.proc_name)
Exemplo n.º 40
0
class MainWindow(QMainWindow):
    """ sets up the main window"""
    def __init__(self, parent=None):
        super(MainWindow, self).__init__(parent)
        self.setWindowTitle("Spectrum Sense")

        self.view = view = ViewWidget()
        self.grid = grid = GridWidget()
        self.gps = gps = GPSWidget()
        self.textUpdate = textUpdate = QTextBrowser()

        tabwidget = QTabWidget()
        tabwidget.addTab(view, 'View')
        tabwidget.addTab(grid, 'Grid')
        tabwidget.addTab(gps, 'GPS')

        splitter = QSplitter(Qt.Vertical)
        splitter.addWidget(tabwidget)
        splitter.addWidget(textUpdate)
        splitter.setStretchFactor(0, 3)
        splitter.setStretchFactor(1, 1)

        self.setCentralWidget(splitter)

        view.startRecord.connect(self.record)
        grid.startRecord.connect(self.record)
        gps.startRecord.connect(self.record)
        gps.noGpsdevice.connect(
            lambda: self.textUpdate.append('[GPS] No device found.'))
        gps.noPositionLock.connect(lambda: self.textUpdate.append(
            '[GPS] Position lock not obtained.'))

        view.startPlot.connect(self.plot)

        self.radio = radio = RadioStreamer()

        self.plotEvent = Event()
        self.threadpool = QThreadPool()
        self.dataqueue = Queue()
        self.threadpool.setMaxThreadCount(2)

    def plot(self, params):
        """ method for plotting"""
        if not self.plotEvent.is_set():
            self.plotEvent.set()
        self.threadpool.waitForDone()
        self.plotEvent.clear()

        self.radio.setup(params, self.plotEvent)
        self.view.canvas.initPlot(
            np.linspace(params['centerFrequency'] - params['bandwidth'] / 2,
                        params['centerFrequency'] + params['bandwidth'] / 2,
                        params['fftsize']), np.zeros(params['fftsize']))

        task1 = Worker(self.radio.stream, self.dataqueue, params['fftsize'],
                       params['window'], 0.3)
        task2 = Worker(self.view.canvas.updatePlot, self.dataqueue)

        self.threadpool.start(task1)
        self.threadpool.start(task2)

    def record(self, params, id):
        """ method for recording"""
        if not self.plotEvent.is_set():
            self.plotEvent.set()
        self.threadpool.waitForDone()
        self.plotEvent.clear()
        if id == 'VIEW':
            self.textUpdate.append('[VIEW] Recording requested.')
            filename = 'data/static/capture.bin'
            self.radio.setup(params, self.plotEvent)
            try:
                self.radio.record(
                    filename, int(params['recordTime'] * params['bandwidth']))
                self.textUpdate.append(
                    '[VIEW] Recording finished. Data Saved to:{}'.format(
                        filename))
            except Exception:
                self.textUpdate.append(
                    '[VIEW] Overflow occured. Captured data may not be valid: {}'
                    .format(filename))
            self.plot(params)
        if id == 'MAP':
            self.textUpdate.append('[MAP] Recording requested.')
            lat = params['position'][0]
            lng = params['position'][1]
            filename = 'data/geo/lat_' + str(lat) + 'lng_' + str(lng) + '.bin'
            self.radio.setup(params, self.plotEvent)
            try:
                self.radio.record(
                    filename, int(params['recordTime'] * params['bandwidth']))
                self.textUpdate.append(
                    '[MAP] Recording finished. Data Saved to:{}'.format(
                        filename))
            except Exception:
                self.textUpdate.append(
                    '[MAP] Overflow occured. Captured data may not be valid: {}'
                    .format(filename))

        if id == 'GRID':
            self.textUpdate.append('[GRID] Recording requested.')
            x = params['position'][0]
            y = params['position'][1]
            step = min(params['step'])
            filename = 'data/grid/x_' + str(x) + 'y_' + str(y) + '.bin'
            self.radio.setup(params, self.plotEvent)
            try:
                self.radio.record(
                    filename, int(params['recordTime'] * params['bandwidth']))
                self.textUpdate.append(
                    '[GRID] Recording finished. Data Saved to:{}'.format(
                        filename))
            except Exception:
                self.textUpdate.append(
                    '[GRID] Overflow occured. Captured data may not be valid: {}'
                    .format(filename))
            task = Worker(self.grid.canvas.updatePlot, x, y, step,
                          params['recordTime'], params['bandwidth'], filename)
            self.threadpool.start(task)

    def closeEvent(self, event):
        """ To handle the event close"""
        event.ignore()
        self.plotEvent.set()
        self.threadpool.waitForDone()
        event.accept()
class MultiThreadedAugmenter(object):
    """ Makes your pipeline multi threaded. Yeah!
    If seeded we guarantee that batches are retunred in the same order and with the same augmentation every time this
    is run. This is realized internally by using une queue per worker and querying the queues one ofter the other.
    Args:
        data_loader (generator or DataLoaderBase instance): Your data loader. Must have a .next() function and return
        a dict that complies with our data structure
        transform (Transform instance): Any of our transformations. If you want to use multiple transformations then
        use our Compose transform! Can be None (in that case no transform will be applied)
        num_processes (int): number of processes
        num_cached_per_queue (int): number of batches cached per process (each process has its own
        multiprocessing.Queue). We found 2 to be ideal.
        seeds (list of int): one seed for each worker. Must have len(num_processes).
        If None then seeds = range(num_processes)
        pin_memory (bool): set to True if all torch tensors in data_dict are to be pinned. Pytorch only.
    """
    def __init__(self,
                 data_loader,
                 transform,
                 num_processes,
                 num_cached_per_queue=2,
                 seeds=None,
                 pin_memory=False):
        self.pin_memory = pin_memory
        self.transform = transform
        if seeds is not None:
            assert len(seeds) == num_processes
        else:
            seeds = [None] * num_processes
        self.seeds = seeds
        self.generator = data_loader
        self.num_processes = num_processes
        self.num_cached_per_queue = num_cached_per_queue
        self._queues = []
        self._processes = []
        self._end_ctr = 0
        self._queue_loop = 0
        self.pin_memory_thread = None
        self.pin_memory_queue = None
        self.abort_event = Event()

    def __iter__(self):
        return self

    def next(self):
        return self.__next__()

    def _next_queue(self):
        r = self._queue_loop
        self._queue_loop += 1
        if self._queue_loop == self.num_processes:
            self._queue_loop = 0
        return r

    def __get_next_item(self):
        success = False
        item = None

        use_this_queue = self._next_queue()

        while not success:
            try:
                if self.abort_event.is_set():
                    self._finish()
                    raise RuntimeError(
                        "MultiThreadedAugmenter.abort_event was set, something went wrong. Maybe one of "
                        "your workers crashed")
                else:
                    if not self.pin_memory:
                        item = self._queues[use_this_queue].get(timeout=2)
                        success = True
                    else:
                        item = self.pin_memory_queue.get(timeout=2)
                        success = True
            except Empty:
                pass

        return item

    def __next__(self):
        if len(self._queues) == 0:
            self._start()
        try:
            item = self.__get_next_item()

            while isinstance(item, str) and (item == "end"):
                self._end_ctr += 1
                if self._end_ctr == self.num_processes:
                    self._end_ctr = 0
                    self._queue_loop = 0
                    logging.debug(
                        "MultiThreadedGenerator: finished data generation")
                    raise StopIteration

                item = self.__get_next_item()

            return item

        except KeyboardInterrupt:
            logging.error(
                "MultiThreadedGenerator: caught exception: {}".format(
                    sys.exc_info()))
            self._finish()
            raise KeyboardInterrupt

    def _start(self):
        if len(self._processes) == 0:
            self.abort_event.clear()

            logging.debug("starting workers")
            self._queue_loop = 0
            self._end_ctr = 0

            if hasattr(self.generator, 'was_initialized'):
                self.generator.was_initialized = False

            for i in range(self.num_processes):
                self._queues.append(Queue(self.num_cached_per_queue))
                self._processes.append(
                    Process(target=producer,
                            args=(self._queues[i], self.generator,
                                  self.transform, i, self.seeds[i],
                                  self.abort_event)))
                self._processes[-1].daemon = True
                self._processes[-1].start()

            if self.pin_memory:
                import torch
                self.pin_memory_queue = thrQueue(2)
                self.pin_memory_thread = threading.Thread(
                    target=pin_memory_loop,
                    args=(self._queues, self.pin_memory_queue,
                          self.abort_event, torch.cuda.current_device()))
                self.pin_memory_thread.daemon = True
                self.pin_memory_thread.start()
        else:
            logging.debug(
                "MultiThreadedGenerator Warning: start() has been called but workers are already running"
            )

    def _finish(self):
        self.abort_event.set()
        sleep(0.2)  # allow pin memory thread to finish
        if len(self._processes) != 0:
            logging.debug("MultiThreadedGenerator: workers terminated")
            for i, p in enumerate(self._processes):
                p.terminate()

                self._queues[i].close()
                self._queues[i].join_thread()

            self._queues = []
            self._processes = []
            self._queue = None
            self._end_ctr = 0
            self._queue_loop = 0

    def restart(self):
        self._finish()
        self._start()

    def __del__(self):
        logging.debug("MultiThreadedGenerator: destructor was called")
        self._finish()
Exemplo n.º 42
0
class TankWorker():
    SECTION = 'core'
    FINISH_FILENAME = 'finish_status.yaml'
    DEFAULT_CONFIG = 'load.yaml'

    def __init__(self,
                 configs,
                 cli_options=None,
                 cfg_patches=None,
                 cli_args=None,
                 no_local=False,
                 log_handlers=None,
                 wait_lock=False,
                 files=None,
                 ammo_file=None,
                 api_start=False):
        self.api_start = api_start
        self.wait_lock = wait_lock
        self.log_handlers = log_handlers if log_handlers is not None else []
        self.files = [] if files is None else files
        self.ammo_file = ammo_file
        self.interrupted = ProcessEvent() if api_start else ThreadEvent()
        self.info = TankInfo(Manager().dict()) if api_start else TankInfo(
            dict())
        self.config_list = self._combine_configs(configs, cli_options,
                                                 cfg_patches, cli_args,
                                                 no_local)
        self.core = TankCore(self.config_list, self.interrupted, self.info)
        self.folder = self.init_folder()
        self.init_logging(debug=True)

        is_locked = Lock.is_locked(self.core.lock_dir)
        if is_locked and not self.core.config.get_option(
                self.SECTION, 'ignore_lock'):
            raise LockError(is_locked)

    @staticmethod
    def _combine_configs(run_cfgs,
                         cli_options=None,
                         cfg_patches=None,
                         cli_args=None,
                         no_local=False):
        if cli_options is None:
            cli_options = []
        if cfg_patches is None:
            cfg_patches = []
        if cli_args is None:
            cli_args = []
        run_cfgs = run_cfgs if len(run_cfgs) > 0 else [
            TankWorker.DEFAULT_CONFIG
        ]

        if no_local:
            configs = [load_cfg(cfg) for cfg in run_cfgs] + \
                parse_options(cli_options) + \
                parse_and_check_patches(cfg_patches) + \
                cli_args
        else:
            configs = [load_core_base_cfg()] + \
                load_local_base_cfgs() + \
                [load_cfg(cfg) for cfg in run_cfgs] + \
                parse_options(cli_options) + \
                parse_and_check_patches(cfg_patches) + \
                cli_args
        return configs

    def init_folder(self):
        folder = self.core.artifacts_dir
        if self.api_start > 0:
            for f in self.files:
                shutil.move(f, folder)
            if self.ammo_file:
                shutil.move(self.ammo_file, folder)
            os.chdir(folder)
        return folder

    def stop(self):
        self.interrupted.set()
        logger.warning('Interrupting')

    def get_status(self):
        return {
            'status_code': self.status,
            'left_time': None,
            'exit_code': self.retcode,
            'lunapark_id': self.get_info('uploader', 'job_no'),
            'tank_msg': self.msg,
            'lunapark_url': self.get_info('uploader', 'web_link'),
            'luna_id': self.get_info('neuploader', 'job_no'),
            'luna_url': self.get_info('neuploader', 'web_link')
        }

    def save_finish_status(self):
        with open(os.path.join(self.folder, self.FINISH_FILENAME), 'w') as f:
            yaml.safe_dump(self.get_status(),
                           f,
                           encoding='utf-8',
                           allow_unicode=True)

    def get_info(self, section_name, key_name):
        return self.info.get_value([section_name, key_name])

    def init_logging(self, debug=False):

        filename = os.path.join(self.core.artifacts_dir, 'tank.log')
        open(filename, 'a').close()
        current_file_mode = os.stat(filename).st_mode
        os.chmod(
            filename,
            current_file_mode | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)

        logger.handlers = []
        logger.setLevel(logging.DEBUG if debug else logging.INFO)

        file_handler = logging.FileHandler(filename)
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(
            logging.Formatter(
                "%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s"
            ))
        file_handler.addFilter(TankapiLogFilter())
        logger.addHandler(file_handler)
        logger.info("Log file created")

        for handler in self.log_handlers:
            logger.addHandler(handler)
            logger.info("Logging handler {} added".format(handler))

    def get_lock(self):
        while not self.interrupted.is_set():
            try:
                lock = Lock(self.test_id, self.folder).acquire(
                    self.core.lock_dir,
                    self.core.config.get_option(self.SECTION, 'ignore_lock'))
                break
            except LockError as e:
                self.upd_msg(e.message)
                if not self.wait_lock:
                    raise RuntimeError("Lock file present, cannot continue")
                logger.warning("Couldn't get lock. Will retry in 5 seconds...")
                time.sleep(5)
        else:
            raise KeyboardInterrupt
        return lock

    def upd_msg(self, msg):
        if msg:
            self.msg = self.msg + '\n' + msg
class SiteChecker(FeedbackInterface, SiteTempDataSrcRefInterface, ProgressLogInterface, ExternalTempInterface):
    full_link_key = "full_link"
    datasource_key = "data_source"
    controller_ley = "controller"
    max_level_key = "max_level"
    max_page_key = "max_page"
    output_queue_key = "output_queue"

    _use_lxml_parser = False

    def __init__(self, full_link: str="", data_source: SiteTempDataSrcInterface=None,
                 controller: SiteCheckerController=None,
                 max_level=10, max_page=1000, delegate=None, output_buff_size=2000,
                 output_queue=None, output_all_external=False, result_delegate=None,
                 memory_control_terminate_event=None, check_robot_text=True,
                 **kwargs):
        """
        :param full_link: The full link of a domain, e.g: https://www.google.co.uk
        :param domain: domain to crawl
        :param max_level: stop crawling if it reaches this level
        :param max_page: maximum pages to check within a site, also stop crawling
        :param delegate: if this is not None, then it will send the latest result of external domain of ResponseCode==404 or 999
        :param result_delegate: send site_info upon finish
        :param memory_control_terminate_event: if this is not None and being set, it will be able to terminate an external memory controlled process.
        :return:
        """
        FeedbackInterface.__init__(self, **kwargs)
        #super(SiteChecker, self).__init__(**kwargs)
        if full_link is None or len(full_link) == 0:
            raise ValueError()

        original_path = ""
        try:
            paras = urlsplit(full_link)
            self.scheme, self.domain, original_path = paras[0], paras[1], paras[2]
        except:
            pass

        domain_data = LinkChecker.get_root_domain(full_link, False)
        self.root_domain = domain_data[1]
        self.sub_domain = domain_data[4]
        self.domain_suffix = domain_data[5]
        self.sub_domain_no_local = self.sub_domain.strip(self.domain_suffix)
        if self.scheme == "":
            self.scheme = "http"
        if self.domain == "":
            self.domain = self.root_domain
        self.orginal_link = full_link
        self.domain_link = LinkChecker.get_valid_link(self.root_domain, full_link, self.scheme)
        self.max_level = max_level
        self.max_page = max_page
        self.page_count = 0  # keep track page done
        self._page_count_shadow = 0 # track previous count
        self._all_page_count_shadow = 0 #track previous count in datasource
        self.internal_page_count = 0
        self.internal_page_last_count = 0
        self.page_allocated = 0
        self.current_level = 0  # if this = 0, it is root domain/home_page
        self._stop_event = Event()
        valid_file_name = SiteTempDataSrcInterface.get_valid_file_name(self.domain_link)
        self._external_db_buffer = ExternalTempDataDiskBuffer(valid_file_name+".ext.db", self,
                                                              stop_event=self._stop_event,
                                                              buf_size=int(output_buff_size/2),
                                                              dir_path=get_db_buffer_default_dir(),
                                                              convert_output=False)
        self._external_db_buffer.append_to_buffer([(self.root_domain, ResponseCode.DNSError),], convert_tuple=False)
        self._memory_control_terminate_event = memory_control_terminate_event
        self.task_control_lock = threading.RLock()
        if data_source is None:
            #self.data_source = SiteTempDataDisk(self.root_domain, ref_obj=self)
            self.data_source = SiteTempDataDiskWithBuff(ref=self.domain_link, output_buff_size=output_buff_size, ref_obj=self)
        else:
            self.data_source = data_source  # a list of OnSiteLink
        self.delegate = delegate
        if LinkChecker.might_be_link_html_page(original_path):
            self.data_source.append(OnSiteLink(self.domain_link, response_code=ResponseCode.LinkOK, link_level=1)) # add the root domain as a starting point
        self.data_source.append(OnSiteLink(self.scheme + "://www."+self.sub_domain, ResponseCode.LinkOK, link_level=1))
        self.data_source.append(OnSiteLink(self.scheme + "://" + self.domain, ResponseCode.LinkOK, link_level=1))
        self.cache_list = []  # internal page cache
        self.page_need_look_up_temp = 0
        self.cache_list.append(self.domain_link)
        if "www." not in self.sub_domain:
            self.cache_list.append(self.scheme + "://www."+self.sub_domain)
        self.cache_list.append(self.scheme + "://" + self.domain)
        self.page_need_look_up = self.data_source.count_all()
        self.cache_size = 500  # create a small cache list to avoid going to check link in file system with lots of read and write
        self._double_check_cache_lock = threading.RLock()
        self._double_check_cache = deque(maxlen=self.cache_size)
        self.external_cache_list = []
        self.external_cache_size = 500  # cache that hold external sites
        self.external_links_checked = 0
        self.add_internal_page_OK_only = True
        self.output_queue = output_queue
        self.output_all_external = output_all_external
        self.controller = controller
        self.result_delegate = result_delegate
        self.page_count_lock = threading.RLock()
        self.internal_page_count_lock = threading.RLock()
        self.level_lock = threading.RLock()
        self.page_look_up_lock = threading.RLock()
        self.external_link_check_lock = threading.RLock()
        self._finihsed = False
        self.task_control_max = 1
        self.agent = "VegeBot (we follow your robots.txt settings before crawling, you can slow down the bot by change the Crawl-Delay parameter in the settings." \
                     "if you have an enquiry, please email to: [email protected])"
        self.agent_from = "*****@*****.**"
        if check_robot_text:
            self.robot_agent = LinkChecker.get_robot_agent(self.sub_domain, protocol=self.scheme)
        else:
            self.robot_agent = None
        self.site_crawl_delay = 0.60

        if isinstance(self.robot_agent, Rules):
            delay_temp = self.robot_agent.delay(self.agent)
            if delay_temp is not None and delay_temp != self.site_crawl_delay:
                self.site_crawl_delay = delay_temp

        self.task_control_counter = 1
        self._speed_penalty_count = 0
        self._speed_penalty_threshold = 10
        self._progress_logging_speed = 120
        self._output_period = 120
        self._output_batch_size = 100
        self._death_wish_sent = False
        SiteChecker._is_lxml_parser_exist()
        self._output_thread = None
        self._output_queue = None
        self.progress_logger = ProgressLogger(self._progress_logging_speed, self, self._stop_event)
        self._status = "Start"
        self._populate_with_state()  # restore laste known state
        # self.data_source.additional_startup_procedures()  # use the data set in self._populate_with_state() to start

    # def _empty_external_links_db(self):
    #     if self.output_queue is not None:
    def _put_result_in_output_queue_loop(self, item_list: list):
        if not self._stop_event.is_set():
            try:
                self._output_queue.put(item_list, True, 2)
            except Exception as ex:
                if self._output_queue is None:
                    manager, self._output_queue = get_queue_client(QueueManager.MachineSettingCrawler,
                                                             QueueManager.Method_Whois_Input)
                time.sleep(0.1)
                ErrorLogger.log_error("SiteChecker._get_external_links_to_queue", self.sub_domain+" "+str(ex))
                self._put_result_in_output_queue_loop(item_list)

    def _get_external_links_to_queue(self):
        ref_time = time.time()
        manager, self._output_queue = get_queue_client(QueueManager.MachineSettingCrawler, QueueManager.Method_Whois_Input)
        self.output_queue = self._output_queue  # override output_queue
        # if result_queue is None:
        #     ErrorLogger.log_error("SiteChecker._get_external_links_to_queue()", ValueError("result queue is none, cannot put item in queue."))
        # else:
        batch = list()
        counter = 0
        for item in self._external_db_buffer:
            if self._stop_event.is_set() or self.external_links_checked >= self._external_db_buffer.count_all():
                try:
                    manager.shutdown()
                except:
                    pass
                finally:
                    # print("exist _get_external_links_to_queue")
            # if self._stop_event.is_set() and self.external_links_checked >= self._external_db_buffer.count_all():
                    break
            elif isinstance(item, tuple):
                # print("outputting item: ", str(item))
                batch.append((item[0], item[1]))
                counter += 1
            if len(batch) > 0:
                current_time = time.time()
                if current_time - ref_time or len(batch) >= self._output_batch_size:
                    self._put_result_in_output_queue_loop(batch)
                    self.external_links_checked += len(batch)
                    ref_time = time.time()
                    batch.clear()

            time.sleep(0.0001)

    @staticmethod
    def _is_lxml_parser_exist():
        try:
            import lxml

        except ImportError:
            SiteChecker._use_lxml_parser = False
        else:
            SiteChecker._use_lxml_parser = True

    def use_lxml_parser(self):
        return SiteChecker._use_lxml_parser

    @staticmethod
    def get_input_parameter_base(full_link: str, max_page: int, max_level: int, output_queue) -> dict:
        return {SiteChecker.full_link_key: full_link, SiteChecker.max_page_key: max_page,
                SiteChecker.max_level_key: max_level, SiteChecker.output_queue_key: output_queue}

    def get_external_count_finished(self) -> int:
        """
        ExternalTempInterface, get the number of job done in ExternalTempDataDiskBuffer
        :return:
        """
        return self.external_links_checked

    def set_internal_count(self, count: int):
        """
        ExternalTempInterface, set the number of job done in ExternalTempDataDiskBuffer
        :param count:
        :return:
        """
        self.external_links_checked = count

    def _set_task_control_max(self, concurrent_task: int):
        if concurrent_task <= 0:
            raise ValueError
        self.task_control_max = concurrent_task
        self.task_control_counter = concurrent_task
        min_page_per_s = concurrent_task/20
        self._speed_penalty_threshold = self._progress_logging_speed * min_page_per_s
        if self.site_crawl_delay > 1/min_page_per_s:
            ErrorLogger.log_error("SiteChecker._set_task_control_max()",
                                  ValueError("site has crawl delay greater than mas delay."), self.domain_link)
            self._status = "Stopped"
            self.sudden_death()

    def get_site_feedback(self) -> SeedSiteFeedback:
        return SeedSiteFeedback(self.orginal_link, page_count=self.get_page_need_look_up())

    def get_site_info(self) -> SiteInfo:  # keep the original reference when sending back the site infomation
        info = SiteInfo(self.orginal_link, self.data_source)
        return info

    def populate_with_state(self, state):
        if state is not None and isinstance(state, SiteCheckerState):
            self._status = "Restarted"
            self.page_count = state.page_count
            self.page_allocated = state.page_count
            self.internal_page_count = state.internal_page_count
            self.internal_page_last_count = state.internal_page_count
            self.external_links_checked = state.external_page_count
            self._external_db_buffer.set_progress(state.external_page_count)
            self.page_need_look_up = state.page_need_look_up
            self.current_level = state.current_level
            self.progress_logger.set_reference(state.log_sample_index, state.log_started_time)
            counter = 0
            if self.data_source is not None:
                try:
                    for item in self.data_source.get_next():
                        if counter >= self.cache_size:
                            break
                        if isinstance(item, OnSiteLink) and not LinkChecker.is_external_link(self.root_domain, item.link):
                            self.cache_list.append(item.link)
                            # print("--restore: ", item)
                            counter += 1
                except Exception as ex:
                    msg = "error in SiteChecker.populate_with_state(), trying to populate cache, " + self.root_domain
                    ErrorLogger.log_error("SiteChecker", ex, msg)

                self.data_source.ref = state.datasource_ref
                self.data_source.output_c = state.datasource_output_c
                self.data_source.set_progress(state.datasource_index if state.datasource_index < state.page_count else state.page_count)
                self.data_source.set_continue_lock(True)

    def get_file_name(self):
        return self.data_source.ref

    def get_limit(self):
        return 100000

    def get_column_names(self):
        return ["Page Index", "External", "All", "Status"]

    def get_progress(self):
        data_source_count = self.data_source.count_all()
        if self.page_count - self._page_count_shadow <= self._speed_penalty_threshold:  # determine if site is slow
            self._speed_penalty_count += 1
            if self._speed_penalty_count > 2:
                self._status = "Stopped"
                self.sudden_death()
        else:
            self._speed_penalty_count = 0

        if self.page_count == self._page_count_shadow and data_source_count == self._all_page_count_shadow:  # determine if site is stucked
            self._status = "Stopped"
            self.sudden_death()

        self._page_count_shadow = self.page_count
        self._all_page_count_shadow = data_source_count
        return [self.page_count, self.external_links_checked, data_source_count, self._status]

    def is_programme_finshed(self):
        return self._finihsed

    def get_callback_data(self):
        with self.page_count_lock:
            gap = self.internal_page_count - self.internal_page_last_count
            self.internal_page_last_count = self.internal_page_count
            seed_feedback = None
            if self._finihsed:
                seed_feedback = self.get_site_feedback()

        return SiteFeedback(gap, self._finihsed, seed_feedback=seed_feedback, datasource_ref=self.data_source.ref)

    def get_state(self):
        return SiteCheckerState(page_count=self.page_count, page_need_look_up=self.page_need_look_up,
                                current_level=self.current_level, internal_page_count=self.internal_page_count,
                                external_page_count= self.external_links_checked,
                                datasource_index=self.data_source.temp_counter,
                                datasource_output_c=self.data_source.output_c,
                                datasource_ref=self.data_source.ref, log_started_time=self.progress_logger.begin_time,
                                log_sample_index=self.progress_logger.limit_counter,)

    def additional_reset(self):
        pass

    def addtional_clear(self):
        pass

    def stop(self):
        # natural stop
        self._status = "Stopped"
        self.progress_logger.report_progress()
        self._stop_event.set()
        if self.progress_logger.is_alive():
            self.progress_logger.join()

    def clear(self):
        self.cache_list.clear()
        self.addtional_clear()

    def acquire_task(self, level: int, link: str):
        tasked_acquired = True
        if link.endswith('/'):
            temp = link
        else:
            temp = link + '/'
        with self.task_control_lock:
            if len(self._double_check_cache) > 0:
                if temp in self._double_check_cache:
                    print("duplicate link found:", link)
                    tasked_acquired = False
                else:
                    if len(self._double_check_cache) >= self.cache_size:
                        self._double_check_cache.popleft()
                    self._double_check_cache.append(temp)
            self.task_control_counter -= 1
            self.page_allocated += 1
            if tasked_acquired:
                if level > self.current_level:
                    self.current_level = level
            # time.sleep(self.site_crawl_delay)
        return tasked_acquired

    def release_task(self, new_page: int):
        with self.task_control_lock:
            if self.page_need_look_up == 1 and new_page == 0:
                PrintLogger.print("set to stop data source")
                self.data_source.set_continue_lock(False)
            else:
                self.page_count += 1
                self.page_need_look_up += new_page
                #self.external_links_checked += external_page_count
                self.task_control_counter += 1
                # was determine if it is internal or external page
                self.internal_page_count += 1
                if self.internal_page_count > self.max_page or self.current_level > self.max_level:
                    if self.data_source.can_continue():
                        PrintLogger.print("set stop: " + str(self.internal_page_count)+" level: "+str(self.current_level))
                        self.data_source.set_continue_lock(False)

    def get_page_count(self):
        with self.page_count_lock:
            page_count = self.page_count
        return page_count

    def set_page_count(self, page_count: int):
        with self.page_count_lock:
            self.page_count = page_count

    def set_internal_page_count(self, count: int):
        with self.internal_page_count_lock:
            self.internal_page_count += count

    def get_internal_page_count(self):
        with self.internal_page_count_lock:
            count = self.internal_page_count
        return count

    def get_current_level(self):
        with self.level_lock:
            current_level = self.current_level
        return current_level

    def set_current_level(self, level):
        with self.level_lock:
            self.current_level = level

    def get_page_need_look_up(self):
        with self.page_look_up_lock:
            page_look_up = self.page_need_look_up
        #self.page_look_up_lock.release()
        return page_look_up

    def set_page_need_look_up(self, page_count):
        with self.page_look_up_lock:
            #time.sleep(0.1)
            self.page_need_look_up = page_count
        # self.page_look_up_lock.release()

    def set_page_need_look_up_plus_more(self, count: int):
        with self.page_look_up_lock:
            self.page_need_look_up += count

    def get_internal_page_progress_index(self)->int:
        return self.get_page_count()

    def set_internal_page_progress_index(self, index: int):
        self.page_count = index
        self.page_allocated = index

    def is_idle(self):
        idle = False
        with self.task_control_lock:
            page_need_look_up = self.get_page_need_look_up()
            new_task_added = page_need_look_up - self.page_need_look_up_temp
            has_new_task = True if new_task_added > 0 else False
            #page_count = self.get_page_count()
            if has_new_task:
                self.page_need_look_up_temp = page_need_look_up
            else:
                if self.task_control_counter >= self.task_control_max:
                    idle = True
                #     print("is idle")
                # else:
                #     print("is working")
        return idle

    def add_link_to_cache(self, link):
        if len(self.cache_list) > self.cache_size:
            return
        else:
            if link.endswith('/'):
                self.cache_list.append(link)
            else:
                self.cache_list.append(link+'/')

    def is_link_in_cache(self, link):
        if link.endswith('/'):
            temp = link
        else:
            temp = link + '/'
        return True if temp in self.cache_list else False

    def reset_as(self, domain: str, link: str=""):  # reset the target domain
        PrintLogger.print("crawl reset as: "+domain)
        self.domain = domain
        self.domain_link = self.scheme + "://" + self.domain
        self.page_count = 0
        self.current_level = 0
        self.set_page_need_look_up(1)
       # self.set_page_looked_up(0)
        self.clear()
        if len(link) == 0:
            self.cache_list.append(self.domain_link)
            self.data_source.re_target(self.domain_link, OnSiteLink(self.domain_link, response_code=ResponseCode.LinkOK, link_level=1))
            #self.data_source.append(OnSiteLink(self.domain_link, response_code=ResponseCode.LinkOK, link_level=1))
        else:
            self.cache_list.append(link)
            self.data_source.re_target(link, OnSiteLink(link, response_code=ResponseCode.LinkOK, link_level=1))
            #self.data_source.append(OnSiteLink(link, response_code=ResponseCode.LinkOK, link_level=1))
        self.additional_reset()
        self.data_source.additional_startup_procedures()

    def crawling(self):  # call this method to start operation
        self._start_sending_feedback()
        self._output_thread = threading.Thread(target=self._get_external_links_to_queue)
        if self.data_source.can_continue():
            self.data_source.additional_startup_procedures()  # use the data set in self._populate_with_state() to start
            self._external_db_buffer.start_input_output_cycle()
            self._output_thread.start()
            self.progress_logger.start()
            self.progress_logger.report_progress()  # log first row
            self._status = "Work"
            self.begin_crawl()
            # prefix = "www."
            # page_count_limit = 2
            # if self.page_count <= page_count_limit and prefix not in self.domain_link:
            #     new_domain = prefix + self.sub_domain
            #     self.reset_as(new_domain)
            #     self._status = "Work"
            #     self.begin_crawl()
            # print("going to stop all.")
            self.stop()
            self.clear()

            self.data_source.additional_finish_procedures()
            # print("going to finish output buffer.")
            self._external_db_buffer.terminate()
            # print("going to stop output_thread.")
            if self._output_thread.is_alive():
                self._output_thread.join()
        PrintLogger.print("finished naturally: "+self.domain_link)
        # print("finished naturally.")
        self._finihsed = True
            #calling this at the end of operation
        PrintLogger.print("send last response")
        # print("send last response")
        # print("send last response.")
        self._end_sending_feedback()
        if self._memory_control_terminate_event is not None:
            self._memory_control_terminate_event.set()

    def sudden_death(self):
        if not self._finihsed:
            self._finihsed = True
            PrintLogger.print("start sudden death: "+self.orginal_link)
            #self.stop()
            self.stop()

            self.clear()
            self.data_source.set_continue_lock(False)
            self.data_source.additional_finish_procedures()
            self._external_db_buffer.terminate()
            if isinstance(self._output_thread, threading.Thread):
                if self._output_thread.is_alive():
                    self._output_thread.join()
                #calling this at the end of operation
            PrintLogger.print("send last response")
            self._end_sending_feedback()
            if self._memory_control_terminate_event is not None:
                ErrorLogger.log_error("SiteChecker", TimeoutError("slow processing speed, terminated."), self.orginal_link)
                self._memory_control_terminate_event.set()

    def begin_crawl(self, level=0):  # subclass this to make different behaviour
        pass
class GpsReader(Process):
    def __init__(self, queue, log_file_path, test=False):
        super(Process, self).__init__()
        self.queue = queue
        self.test = test
        self.exit = Event()

        self.speed = -1

        self.log_file_path = log_file_path

        if not test:
            self._setup_gps()
        self.last_timestamp = time.monotonic()

    def _setup_gps(self):

        uart = serial.Serial("/dev/ttyUSB0", baudrate=9600, timeout=3000)

        # Create a GPS module instance.
        self.gps = adafruit_gps.GPS(uart, debug=False)
        self.gps.send_command(b'PMTK314,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0')
        self.gps.send_command(b'PMTK220,500')

    def run(self):
        print("Running GPS Reader")

        with open(self.log_file_path, "w") as log_file:
            self.csv_writer = csv.writer(log_file)
            self.csv_writer.writerow(["Speed"])  #write header

            while not self.exit.is_set():
                if not self.test:
                    self._update_speed()
                else:
                    self._update_fake_speed()

                if self.speed >= 0:  #speed has been set
                    self._log_speed()

    def _log_speed(self):
        self.csv_writer.writerow([self.speed])

        try:
            self.queue.put(self.speed, True, 0.1)
        except Q.Full:
            pass  #Don't let a full queue cause an issue

    def _update_speed(self):
        # Make sure to call gps.update() every loop iteration and at least twice
        # as fast as data comes from the GPS unit (usually every second).
        # This returns a bool that's true if it parsed new data (you can ignore it
        # though if you don't care and instead look at the has_fix property).
        self.gps.update()
        current = time.monotonic()
        if current - self.last_timestamp >= 1.0:
            self.last_timestamp = current
            if not self.gps.has_fix:
                # Try again if we don't have a fix yet
                print('Waiting for fix...')
                self.speed = -1
                return

            # We have a fix! (gps.has_fix is true)
            if self.gps.track_angle_deg is not None:
                #convert knots to mph
                self.speed = self.gps.speed_knots * 1.15078

    def _update_fake_speed(self):
        current = time.monotonic()
        if current - self.last_timestamp >= 0.1:
            self.last_timestamp = current
            self.speed = random.randint(0, 50)

    def shutdown(self):
        print("Shutting Down GPS Reader")
        self.exit.set()
Exemplo n.º 45
0
class IPRPCChannel(Process):
    """
    An IPRPC (InterPlanetary Remote Procedure Call) channel is the base unit
    of communication in pillar. It allows two nodes to pass IPRPCMessages
    between each other using IPFS pubsub queues. It generates random channel
    IDs based on the peer fingerprints, time, and an optional pre-shared-key.

    The queue IDs will rotate every hour, and the channel listens on both
    the previous hour and next hours ID so no messages should be lost during
    the transition.

    This class is designed to be run as a subprocess, as such it is started
    with the .start() method, which calls the .run() method in a subprocess.
    Messages are received from and passed to this thread using the pipe
    endpoints returned by the .get_pipe_endpoints() method.
    """

    def __init__(self,
                 id: str,
                 peer_fingerprint: str = None,
                 encryption_helper: EncryptionHelper = None,
                 ipfs_instance: IPFSClient = None,
                 keepalive_send_interval: int = 30,
                 keepalive_timeout_interval: int = 60,
                 pre_shared_key: str = ''):
        self.id = id
        self.peer_id = peer_fingerprint
        self.pre_shared_key = pre_shared_key
        self.queues = []
        self.encryption_helper = encryption_helper
        self.ipfs = ipfs_instance or IPFSClient()
        self.our_ipfs_peer_id = None
        self.tx_input, self.tx_output = Pipe()
        self.rx_input, self.rx_output = Pipe()
        self.status = PeeringStatus.IDLE
        self.logger = logging.getLogger(self.__repr__())
        self.keepalive_send_interval = keepalive_send_interval
        self.keepalive_timeout_interval = keepalive_timeout_interval
        self.timeout = None
        self.keepalive_send_timeout = None
        self._establish_and_rotate_queues()
        self.shutdown_callback = Event()
        super().__init__()
        self.logger.info(
            f"Spawned channel between {self.id} and {self.peer_id}")
        self.logger.info(
            f"Initial channel window: {self.queues}"
        )

    def get_pipe_endpoints(self):
        """
        Returns the endpoints that allow messages to be sent and received
        from this thread.
        :return:
            tx_pipe, rx_pipe
        """
        return self.tx_input, self.rx_output

    def run(self) -> None:
        """
        This method should not be called directly, .start() will start a
        subprocess thread where this method will be called.
        :return:
        """
        while True:
            self.timeout = time.time() + self.keepalive_timeout_interval
            self.keepalive_send_timeout = time.time() + \
                self.keepalive_send_interval
            rx_workers = []
            asyncio.ensure_future(handler_loop(
                self._handle_establish_connection, sleep=5)
            )
            rx_workers.append(asyncio.ensure_future(
                handler_loop(
                    self._handle_messages_current_window,
                    sleep=.01
                )
            ))
            rx_workers.append(asyncio.ensure_future(
                handler_loop(
                    self._handle_messages_previous_window,
                    sleep=.01
                )
            ))
            rx_workers.append(asyncio.ensure_future(
                handler_loop(
                    self._handle_messages_next_window,
                    sleep=.01
                )
            ))
            asyncio.ensure_future(
                handler_loop(
                    self._handle_tx_queue_messages,
                    sleep=.01
                )
            )
            asyncio.ensure_future(
                handler_loop(
                    self._handle_keepalive,
                    sleep=5,
                )
            )
            asyncio.ensure_future(
                handler_loop(
                    self._handle_timeout,
                    sleep=5,
                )
            )
            asyncio.ensure_future(handler_loop(
                self._async_rotate_queues_wrapper,
                sleep=5
            ))
            asyncio.ensure_future(handler_loop(
                self._stop_on_shutdown_event,
                sleep=1
            ))
            loop = asyncio.get_event_loop()
            loop.run_forever()
            print(f"Cancelling rx workers: {rx_workers}")
            for rx_worker in rx_workers:
                rx_worker.cancel()
            if self.shutdown_callback.is_set():
                break

    def _change_peering_status(self, new_status: PeeringStatus):
        if self.status != new_status:
            self.logger.info(f"Peering status change from {self.status} to "
                             f"{new_status}")
            self.status = new_status

    async def _handle_establish_connection(self) -> None:
        """
        Sends a PeeringHello message to wake up the other side of the
        connection.
        """
        if not self.status == PeeringStatus.ESTABLISHED:
            self._change_peering_status(PeeringStatus.ESTABLISHING)
            await self._send_message(PeeringHello(initiator_id=self.id))

    async def _handle_tx_queue_messages(self) -> None:
        """
        If state == ESTABLISHED, this will pull messages from the tx queue pipe
        and send them to the other peer.
        """
        if self.status == PeeringStatus.ESTABLISHED:
            if self.tx_output.poll():
                message = self.tx_output.recv()
                await self._send_message(message)

    async def _handle_timeout(self) -> None:
        if time.time() > self.timeout:
            if not self.status == PeeringStatus.ESTABLISHING:
                self._change_peering_status(PeeringStatus.IDLE)

    async def _stop_on_shutdown_event(self) -> None:
        loop = asyncio.get_event_loop()
        if self.shutdown_callback.is_set():
            loop.stop()

    async def _async_rotate_queues_wrapper(self) -> None:
        """
        If the _establish_and_rotate_queues() returns true, this resets the
        event loop and stops all worker asyncio events so they don't wait
        for messages on retired channels.
        """
        loop = asyncio.get_event_loop()
        result = self._establish_and_rotate_queues()
        if result:
            self.logger.info("Event loop reset")
            loop.stop()

    def _establish_and_rotate_queues(self) -> bool:
        """
        Handles rotating the sliding queue_id window.
        :return:
            True on window change
            False on no change
        """
        previous_queue_id = self._get_queue_id(time_delta=timedelta(hours=-1))
        current_queue_id = self._get_queue_id()
        next_queue_id = self._get_queue_id(time_delta=timedelta(hours=1))
        current_list = [previous_queue_id, current_queue_id, next_queue_id]
        if not self.queues:
            self.logger.info(f"Current Queues: {self.queues}")
            self.queues = current_list
            return True
        elif self.queues != current_list:
            self.logger.info(f"Window slide occured, Old Queues: {self.queues}"
                             f"New Queues: {current_list}")
            self.queues = current_list
            return True
        else:
            return False

    async def _handle_messages_previous_window(self) -> None:
        await self._handle_incoming_messages(self.queues[0])

    async def _handle_messages_next_window(self) -> None:
        await self._handle_incoming_messages(self.queues[2])

    async def _handle_messages_current_window(self) -> None:
        await self._handle_incoming_messages(self.queues[1])

    def _get_queue_id(self, time_delta=None):
        if time_delta:
            time = datetime.utcnow() + time_delta
        else:
            time = datetime.utcnow()
        return generate_queue_id(
            self.id,
            self.peer_id,
            preshared_key=self.pre_shared_key,
            datetime=time
        )

    async def _handle_incoming_messages(self, queue_id: str) -> None:
        """
        Handles messages received from the other peer.
        Some messages are needed by the class to establish a connection or keep
        it alive. All others are output over the rx pipe so they can be passed
        to other processes.
        :param queue_id:
            ID of the pubsub queue to handle messages on.
        """
        async for rx_message in self._get_message(queue_id):
            if type(rx_message) is PeeringHello:
                await self._send_message(
                    PeeringHelloResponse(responder_id=self.id)
                )
            elif type(rx_message) is PeeringHelloResponse:
                self.peer_id = rx_message.responder_id
                self._change_peering_status(PeeringStatus.ESTABLISHED)
            elif type(rx_message) is PeeringKeepalive:
                self.timeout = time.time() + \
                    self.keepalive_timeout_interval
            else:
                self.rx_input.send(rx_message)

    async def _handle_keepalive(self):
        if self.status == PeeringStatus.ESTABLISHED:
            if time.time() > self.keepalive_send_timeout:
                await self._send_message(PeeringKeepalive())
                self.keepalive_send_timeout = time.time() + \
                    self.keepalive_send_interval

    async def _set_our_ipfs_peer_id(self) -> None:
        """This sets self.our_ipfs_peer_id so we can ignore messages we sent"""
        id_info = await self.ipfs.get_id()
        self.our_ipfs_peer_id = id_info.get('ID')
        self.logger.info(f"Set our ipfs peer id to {self.our_ipfs_peer_id}")

    async def _send_message(self, call: IPRPCMessage):
        message = call.serialize_to_json()
        if self.encryption_helper:
            message = self.encryption_helper.\
                sign_and_encrypt_string_to_peer_fingerprint(
                    message,
                    self.peer_id
                )
        await self._send_ipfs(message)
        self.logger.info(f"Sent message: {call}")

    async def _send_ipfs(self, message: str) -> None:
        await self.ipfs.send_pubsub_message(self.queues[1], message)

    async def _get_from_ipfs(self, queue_id: str):
        if not self.our_ipfs_peer_id:
            await self._set_our_ipfs_peer_id()
        async for message in self.ipfs.get_pubsub_message(queue_id):
            if not message['from'].decode() == self.our_ipfs_peer_id:
                raw_message = unquote(message['data'].decode('utf-8'))
                yield raw_message

    async def _get_message(self, queue_id: str):
        async for message in self._get_from_ipfs(queue_id):
            if self.encryption_helper:
                try:
                    message = self._decrypt_message(message)
                except Exception as e:
                    self.logger.warning(f"Failed to decrypt message on"
                                        f" encrypted channel: {e}")
            try:
                message = IPRPCRegistry.deserialize_from_json(message)
                self.logger.info(f"Got message from peer: {message}")
                yield message
            except Exception as e:
                self.logger.warning(f"Failed to decodde message: {e}")

    def _decrypt_message(self, message: str):
        return self.encryption_helper.\
            decrypt_and_verify_encrypted_message(message)

    def __repr__(self) -> str:
        return f"<{self.__class__.__name__}:" \
            f"peer_id={self.peer_id}>"
Exemplo n.º 46
0
class Cluster(object):
    def __init__(self, list_key=Conf.Q_LIST):
        self.sentinel = None
        self.stop_event = None
        self.start_event = None
        self.pid = current_process().pid
        self.host = socket.gethostname()
        self.list_key = list_key
        self.timeout = Conf.TIMEOUT
        signal.signal(signal.SIGTERM, self.sig_handler)
        signal.signal(signal.SIGINT, self.sig_handler)

    def start(self):
        # This is just for PyCharm to not crash. Ignore it.
        if not hasattr(sys.stdin, 'close'):

            def dummy_close():
                pass

            sys.stdin.close = dummy_close
        # Start Sentinel
        self.stop_event = Event()
        self.start_event = Event()
        self.sentinel = Process(target=Sentinel,
                                args=(self.stop_event, self.start_event,
                                      self.list_key, self.timeout))
        self.sentinel.start()
        logger.info(_('Q Cluster-{} starting.').format(self.pid))
        while not self.start_event.is_set():
            sleep(0.1)
        return self.pid

    def stop(self):
        if not self.sentinel.is_alive():
            return False
        logger.info(_('Q Cluster-{} stopping.').format(self.pid))
        self.stop_event.set()
        self.sentinel.join()
        logger.info(_('Q Cluster-{} has stopped.').format(self.pid))
        self.start_event = None
        self.stop_event = None
        return True

    def sig_handler(self, signum, frame):
        logger.debug(
            _('{} got signal {}').format(
                current_process().name,
                Conf.SIGNAL_NAMES.get(signum, 'UNKNOWN')))
        self.stop()

    @property
    def stat(self):
        if self.sentinel:
            return Stat.get(self.pid)
        return Status(self.pid)

    @property
    def is_starting(self):
        return self.stop_event and self.start_event and not self.start_event.is_set(
        )

    @property
    def is_running(self):
        return self.stop_event and self.start_event and self.start_event.is_set(
        )

    @property
    def is_stopping(self):
        return self.stop_event and self.start_event and self.start_event.is_set(
        ) and self.stop_event.is_set()

    @property
    def has_stopped(self):
        return self.start_event is None and self.stop_event is None and self.sentinel
Exemplo n.º 47
0
class SlaPrinterApp(Flask, Observable, Observer, Process):
    def __init__(self, import_name, db_controller = None, bus = None):
        #super(SlaPrinterApp, self).__init__(import_name, template_folder=TEMPLATE_DIR, static_url_path=STATIC_DIR)
        Flask.__init__(self,import_name, static_folder=STATIC_DIR, template_folder=TEMPLATE_DIR)
        Observable.__init__(self, bus)
        Observer.__init__(self, bus)
        Process.__init__(self)
        self.exit = Event()


        self.endpoint_prefix = None
        for name in dir(self):
            if hasattr(getattr(self, name), ("_routing_data")):
                fn = getattr(self, name)
                rds = fn._routing_data
                for rd in rds:
                    self.route(*rd.args, **rd.kwargs)(fn)


        self.register_error_handler(404, self.page_not_found)
        self.db_controller = db_controller

        print("Flask Server initializing")

        #func = lambda: self.run(host='0.0.0.0',debug=True, port=4242,  use_evalex=False, use_reloader=False)
        #self.runner_process = Process(target=func)


    @route("/")
    @route("/index")
    def index(self):

        if self.db_controller is not None:
            tasks = self.db_controller.printing_tasks()
            print("tasks: " + str(tasks))
            active_job = self.db_controller.active_job()
            return render_template("index.html", printing_tasks = tasks, active_job= active_job)

        else:
            return render_template("index.html")

    @route("/enqueue")
    def enqueue(self):

        return render_template("enqueue.html")

    @route("/info")
    def info(self):

        return render_template("info.html")

    @route("/quit", methods = ['POST', 'GET'])
    def quit(self):


        msg = QuitMessage(SlaPrinterAppProto(),"sla printer shutting down")
        self.put_message(msg)

        return "quitting"

    @route("/post/raw/", methods = ['POST'])
    def post_data(self):
        '''
        function used to send simple raw string data to 3D printer

        :return: according success / fail message
        '''


        # get current data pool to store new objects
        #data_pool = cntrl.DataPool()

        # load data
        data = json.loads(request.data)


        # if "data" in data:
        #     d  = RawData(data["data"])
        #     data_pool.add(d)
        #     return str(d)
        # else:
        #     return "no data received"

    @route("/post/task/",  methods = ['POST'])
    def post_task(self):
        '''
        Function used to send printing task to 3D printer

        :return: according success / fail message
        '''

        # load data
        data = json.loads(request.data)

        # create empty printing task
        task = PrintingTaskData()

        # if received data parseable to task object store new printing task
        if task.parse(data):

            if self.db_controller is not None:
                jid = self.db_controller.save_printing_task(task)
                return json.dumps({"id": jid})
            else:
                return "no valid printing task received"
        else:
            return "invalid data"


    @route("/download/<jid>/",  methods = ['POST', 'GET'])
    def download_zip(self, jid):

        # data_pool = cntrl.DataPool()
        #
        #
        task = self.db_controller.get_by_id(jid)

        if task is not None:

            zip = task.stl_file
            zip = base64.decodestring(zip)
            filename = task.file_name

        else:
            zip = ''
            filename = ''


        return Response(zip,
                mimetype='application/zip',
                headers={'Content-Disposition':'attachment;filename=' + str(filename) + '.zip'})
                #headers={'Content-Disposition':'attachment;filename=' + str(file_name) + '.zip'})

    @route("/stepper/down/<steps>/",  methods = ['POST', 'GET'])
    def steps_down(self, steps):
        print(" received " + str(steps) + " down message")


    def page_not_found(self, e):
        return render_template('404.html'), 404


    def notify(self, msg):
        print("[" + str(now()) + "] Server :: " + str(msg))



    def start(self):
        print("[" + str(now()) + "] Server :: starting ")
        #self.runner_process.start()
        #self.running = True
        Process.start(self)



    def stop(self):
        self.exit.set()
        self.terminate()
        #self.join()

    def run(self):

        # run server
        Flask.run(self, host='0.0.0.0',debug=True, port=4242,  use_evalex=False, use_reloader=False)


        while not self.exit.is_set():

            print("[" + str(now()) + "] server heartbeat ")
            time.sleep(0.5)


        print("[" + str(now()) + "] server shutting down ")

        return
Exemplo n.º 48
0
class RestoreVMsWindow(Ui_Restore, QWizard):

    __pyqtSignals__ = ("restore_progress(int)","backup_progress(int)")

    def __init__(self, app, qvm_collection, blk_manager, parent=None):
        super(RestoreVMsWindow, self).__init__(parent)

        self.app = app
        self.qvm_collection = qvm_collection
        self.blk_manager = blk_manager

        self.restore_options = None
        self.vms_to_restore = None
        self.func_output = []
        self.feedback_queue = Queue()
        self.canceled = False
        self.tmpdir_to_remove = None
        self.error_detected = Event()

        self.excluded = {}

        self.vm = self.qvm_collection[0]

        assert self.vm != None

        self.setupUi(self)

        self.select_vms_widget = MultiSelectWidget(self)
        self.select_vms_layout.insertWidget(1, self.select_vms_widget)

        self.connect(self, SIGNAL("currentIdChanged(int)"), self.current_page_changed)
        self.connect(self, SIGNAL("restore_progress(QString)"), self.commit_text_edit.append)
        self.connect(self, SIGNAL("backup_progress(int)"), self.progress_bar.setValue)
        self.dir_line_edit.connect(self.dir_line_edit, SIGNAL("textChanged(QString)"), self.backup_location_changed)
        self.connect(self.verify_only, SIGNAL("stateChanged(int)"),
                     self.on_verify_only_toogled)

        self.select_dir_page.isComplete = self.has_selected_dir
        self.select_vms_page.isComplete = self.has_selected_vms
        self.confirm_page.isComplete = self.all_vms_good
        #FIXME
        #this causes to run isComplete() twice, I don't know why
        self.select_vms_page.connect(self.select_vms_widget, SIGNAL("selected_changed()"), SIGNAL("completeChanged()"))

        fill_appvms_list(self)
        self.__init_restore_options__()

    @pyqtSlot(name='on_select_path_button_clicked')
    def select_path_button_clicked(self):
        select_path_button_clicked(self, True)

    def on_ignore_missing_toggled(self, checked):
        self.restore_options['use-default-template'] = checked
        self.restore_options['use-default-netvm'] = checked

    def on_ignore_uname_mismatch_toggled(self, checked):
        self.restore_options['ignore-username-mismatch'] = checked

    def on_verify_only_toogled(self, checked):
        self.restore_options['verify-only'] = bool(checked)

    def cleanupPage(self, p_int):
        if self.page(p_int) is self.select_vms_page:
            self.vms_to_restore = None
        else:
            super(RestoreVMsWindow, self).cleanupPage(p_int)

    def __fill_vms_list__(self):
        if self.vms_to_restore is not None:
            return

        self.select_vms_widget.selected_list.clear()
        self.select_vms_widget.available_list.clear()

        self.target_appvm = None
        if self.appvm_combobox.currentIndex() != 0:   #An existing appvm chosen
            self.target_appvm = self.qvm_collection.get_vm_by_name(
                    str(self.appvm_combobox.currentText()))

        try:
            self.vms_to_restore = backup.backup_restore_prepare(
                    unicode(self.dir_line_edit.text()),
                    unicode(self.passphrase_line_edit.text()),
                    options=self.restore_options,
                    host_collection=self.qvm_collection,
                    encrypted=self.encryption_checkbox.isChecked(),
                    appvm=self.target_appvm)

            for vmname in self.vms_to_restore:
                if vmname.startswith('$'):
                    # Internal info
                    continue
                self.select_vms_widget.available_list.addItem(vmname)
        except QubesException as ex:
            QMessageBox.warning (None, "Restore error!", str(ex))

    def __init_restore_options__(self):
        if not self.restore_options:
            self.restore_options = {}
            backup.backup_restore_set_defaults(self.restore_options)

        if 'use-default-template' in self.restore_options and 'use-default-netvm' in self.restore_options:
            val = self.restore_options['use-default-template'] and self.restore_options['use-default-netvm']
            self.ignore_missing.setChecked(val)
        else:
            self.ignore_missing.setChecked(False)

        if 'ignore-username-mismatch' in self.restore_options:
            self.ignore_uname_mismatch.setChecked(self.restore_options['ignore-username-mismatch'])

    def gather_output(self, s):
        self.func_output.append(s)

    def restore_error_output(self, s):
        self.error_detected.set()
        self.feedback_queue.put((SIGNAL("restore_progress(QString)"),
                                 u'<font color="red">{0}</font>'.format(s)))

    def restore_output(self, s):
        self.feedback_queue.put((SIGNAL("restore_progress(QString)"),
                                 u'<font color="black">{0}</font>'.format(s)))

    def update_progress_bar(self, value):
        self.feedback_queue.put((SIGNAL("backup_progress(int)"), value))

    def __do_restore__(self, thread_monitor):
        err_msg = []
        self.qvm_collection.lock_db_for_writing()
        try:
            backup.backup_restore_do(self.vms_to_restore,
                                     self.qvm_collection,
                                     print_callback=self.restore_output,
                                     error_callback=self.restore_error_output,
                                     progress_callback=self.update_progress_bar)
        except backup.BackupCanceledError as ex:
            self.canceled = True
            self.tmpdir_to_remove = ex.tmpdir
            err_msg.append(unicode(ex))
        except Exception as ex:
            print "Exception:", ex
            err_msg.append(unicode(ex))
            err_msg.append("Partially restored files left in "
                           "/var/tmp/restore_*, investigate them and/or clean them up")

        self.qvm_collection.unlock_db()
        if self.canceled:
            self.emit(SIGNAL("restore_progress(QString)"),
                      '<b><font color="red">{0}</font></b>'
                      .format("Restore aborted!"))
        elif len(err_msg) > 0 or self.error_detected.is_set():
            if len(err_msg) > 0:
                thread_monitor.set_error_msg('\n'.join(err_msg))
            self.emit(SIGNAL("restore_progress(QString)"),
                      '<b><font color="red">{0}</font></b>'
                      .format("Finished with errors!"))
        else:
            self.emit(SIGNAL("restore_progress(QString)"),
                      '<font color="green">{0}</font>'
                      .format("Finished successfully!"))

        thread_monitor.set_finished()

    def current_page_changed(self, id):

        old_sigchld_handler = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
        if self.currentPage() is self.select_vms_page:
            self.__fill_vms_list__()

        elif self.currentPage() is self.confirm_page:
            for v in self.excluded:
                self.vms_to_restore[v] = self.excluded[v]
            self.excluded = {}
            for i in range(self.select_vms_widget.available_list.count()):
                vmname =  self.select_vms_widget.available_list.item(i).text()
                self.excluded[str(vmname)] = self.vms_to_restore[str(vmname)]
                del self.vms_to_restore[str(vmname)]

            del self.func_output[:]
            self.vms_to_restore = backup.restore_info_verify(self.vms_to_restore,
                                                             self.qvm_collection)
            backup.backup_restore_print_summary(
                    self.vms_to_restore, print_callback = self.gather_output)
            self.confirm_text_edit.setReadOnly(True)
            self.confirm_text_edit.setFontFamily("Monospace")
            self.confirm_text_edit.setText("\n".join(self.func_output))

            self.confirm_page.emit(SIGNAL("completeChanged()"))

        elif self.currentPage() is self.commit_page:
            self.button(self.FinishButton).setDisabled(True)
            self.showFileDialog.setEnabled(True)
            self.showFileDialog.setChecked(self.showFileDialog.isEnabled()
                                           and str(self.dir_line_edit.text())
                                           .count("media/") > 0)

            self.thread_monitor = ThreadMonitor()
            thread = threading.Thread (target= self.__do_restore__ , args=(self.thread_monitor,))
            thread.daemon = True
            thread.start()

            while not self.thread_monitor.is_finished():
                self.app.processEvents()
                time.sleep (0.1)
                try:
                    for (signal_to_emit,data) in iter(self.feedback_queue.get_nowait,None):
                        self.emit(signal_to_emit,data)
                except Empty:
                    pass

            if not self.thread_monitor.success:
                if self.canceled:
                    if self.tmpdir_to_remove and \
                        QMessageBox.warning(None, "Restore aborted",
                                            "Do you want to remove temporary "
                                            "files from %s?" % self
                                                    .tmpdir_to_remove,
                                            QMessageBox.Yes, QMessageBox.No) == \
                            QMessageBox.Yes:
                        shutil.rmtree(self.tmpdir_to_remove)
                else:
                    QMessageBox.warning (None, "Backup error!", "ERROR: {1}"
                                      .format(self.vm.name, self.thread_monitor.error_msg))

            if self.showFileDialog.isChecked():
                self.emit(SIGNAL("restore_progress(QString)"),
                          '<b><font color="black">{0}</font></b>'.format(
                              "Please unmount your backup volume and cancel "
                              "the file selection dialog."))
                if self.target_appvm:
                    self.target_appvm.run("QUBESRPC %s dom0" % "qubes"
                                                               ".SelectDirectory")
                else:
                    file_dialog = QFileDialog()
                    file_dialog.setReadOnly(True)
                    file_dialog.getExistingDirectory(
                        self, "Detach backup device",
                        os.path.dirname(unicode(self.dir_line_edit.text())))
            self.progress_bar.setValue(100)
            self.button(self.FinishButton).setEnabled(True)
            self.button(self.CancelButton).setEnabled(False)
            self.showFileDialog.setEnabled(False)

        signal.signal(signal.SIGCHLD, old_sigchld_handler)

    def all_vms_good(self):
        for vminfo in self.vms_to_restore.values():
            if not vminfo.has_key('vm'):
                continue
            if not vminfo['good-to-go']:
                return False
        return True

    def reject(self):
        if self.currentPage() is self.commit_page:
            if backup.backup_cancel():
                self.emit(SIGNAL("restore_progress(QString)"),
                          '<font color="red">{0}</font>'
                          .format("Aborting the operation..."))
                self.button(self.CancelButton).setDisabled(True)
        else:
            self.done(0)

    def has_selected_dir(self):
        backup_location = unicode(self.dir_line_edit.text())
        if not backup_location:
            return False
        if self.appvm_combobox.currentIndex() == 0:
            if os.path.isfile(backup_location) or \
                    os.path.isfile(os.path.join(backup_location, 'qubes.xml')):
                return True
        else:
            return True

        return False

    def has_selected_vms(self):
        return self.select_vms_widget.selected_list.count() > 0

    def backup_location_changed(self, new_dir = None):
        self.select_dir_page.emit(SIGNAL("completeChanged()"))
Exemplo n.º 49
0
class ProxyServer:
    """Extremely basic proxy server that simply forwards all traffic along.
    
    http://luugiathuy.com/2011/03/simple-web-proxy-python/ used as a reference

    Methods:
        start(): Start the proxy server subprocess.

        stop(): Cleanly stop the proxy server subprocess.
    """
    def __init__(self):
        self.e = Event()  # Event signalling used for stopping the subprocess
        self.server = Process(target=self._run, name='Proxy_Server')
        self.e.set()
        self.sock = None
        attempts = 0
        while (attempts < 5):
            if attempts > 0:
                print(
                    f"Attempting to open socket again in 10 seconds. ({attempts+1}/5)"
                )
                sleep(10)
            try:
                # create the socket
                self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

                # associate the socket to a host and port number
                self.sock.bind(('', PROXY_PORT_NUMBER))
                # release the socket immediately
                self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

                # listen for up to 50 connections
                MAX_PENDING_CONNECTIONS = 50
                self.sock.listen(MAX_PENDING_CONNECTIONS)
                break
            except socket.error as e:
                if self.sock:
                    self.sock.close()
                print("Could not open socket: " + str(e))
                attempts += 1

    def start(self):
        """Starts the proxy server."""
        self.server.start()

    def stop(self):
        """Stops the proxy server."""
        self.e.clear()
        requests.get(SERVER_URL, proxies={'http': '127.0.0.1:9003'})
        self.e.set()

    def split_url(self, orig_url):
        """Extracts url and port from a given url."""

        # Default values if not specified in request
        protocol = 'http://'
        path = ''

        # separate the fragment if necessary
        url = orig_url.decode('utf-8')
        url = url.split('#')
        if len(url) > 1:
            url, fragment = url[0], url[1]
        else:
            url, fragment = url[0], None

        # determine the protocol
        url_pos = url.find('://')
        if url_pos != -1:
            temp = url
            protocol = f"{url[:url_pos]}://"
            url = url[url_pos + 3:]

        # determine the path, if any
        path_pos = url.find('/')
        if path_pos != -1:
            path = url[path_pos:]
            url = url[:path_pos]

        # determine the port, if any
        port_pos = url.find(':')
        if port_pos != -1:
            port = url[port_pos + 1:]
            url = url[:port_pos]

        return protocol, url, int(port), path

    def _run(self):
        while (self.e.is_set()):
            # Handle requests
            conn, client_addr = self.sock.accept()

            handler = Process(target=self._proxy,
                              name='Request_Handler',
                              args=(conn, client_addr))

            # Since this is only used for testing, we want this to be synchronous.
            handler.start()
            conn.close()

        # Close the socket when we're done.
        self.sock.close()

    def _proxy(self, conn, client_addr):
        # read the request
        request = conn.recv(MAX_DATA_RECV)

        # we need to extract the URL from the first line of the request
        all_lines = request.split(bytes('\n', 'utf-8'))
        first_line = all_lines[0]

        # method is [0], url is [1]
        orig_url = first_line.split(bytes(' ', 'utf-8'))[1]

        # Split the url into its components.
        protocol, base_url, port, path = self.split_url(orig_url)

        try:
            # Create a socket to connect to the server
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect((base_url, port))
            s.send(request)

            while True:
                # Receive data from web server
                data = s.recv(MAX_DATA_RECV)
                if len(data) > 0:
                    # send to browser
                    conn.send(data)
                else:
                    break
        except socket.error as e:
            print("Runtime Error: " + str(e))
        finally:
            if s:
                s.close()
            if conn:
                conn.close()
Exemplo n.º 50
0
class ValkkaProcess(Process):
    """
    Semantics:

    Frontend: the part of the forked process that keeps running in the current, user virtual memory space
    Backend : the part of the forked process that runs in its own virtual memory space (e.g. "in the background")

    This class has both backend and frontend methods:

    - Backend methods should only be called from backend.  They are designated with "_".
    - Frontend methods should only be called from frontend

    To avoid confusion, backend methods are designated with "_", except for the "run()" method, that's always in the backend

    Frontend methods use a pipe to send a signal to backend that then handles the signal with a backend method having the same name (but with "_" in the end)

    Backend methods can, in a similar fashion, send signals to the frontend using a pipe.  In frontend, a listening thread is needed.   That thread can then call
    the handleSignal method that chooses the correct frontend method to call

    TODO: add the possibility to bind the process to a certain processor
    """

    # incoming signals : from frontend to backend
    incoming_signal_defs = {  # each key corresponds to a front- and backend methods
        "test_": {
            "test_int": int,
            "test_str": str
        },
        "stop_": []
    }

    # outgoing signals : from back to frontend.  Don't use same names as for
    # incoming signals ..
    outgoing_signal_defs = {
        "test_o": {
            "test_int": int,
            "test_str": str
        },
    }

    def __init__(self, name, affinity=-1, **kwargs):
        super().__init__()
        self.pre = self.__class__.__name__ + " : " + name + \
            " : "  # auxiliary string for debugging output
        self.name = name
        self.affinity = affinity
        self.signal_in = Event()
        self.signal_out = Event()
        # communications pipe.  Frontend uses self.pipe, backend self.childpipe
        self.pipe, self.childpipe = Pipe()

        self.signal_in.clear()
        self.signal_out.clear()

        # print(self.pre, "init")

    def getPipe(self):
        """Returns communication pipe for front-end
        """
        return self.pipe

    def preRun_(self):
        """After the fork, but before starting the process loop
        """
        if (self.affinity > -1):
            os.system("taskset -p -c %d %d" % (self.affinity, os.getpid()))

    def postRun_(self):
        """Just before process exit
        """
        print(self.pre, "post: bye!")

    def cycle_(self):
        # Do whatever your process should be doing, remember timeout every now
        # and then
        time.sleep(5)
        print(self.pre, "hello!")

    def startAsThread(self):
        from threading import Thread
        t = Thread(target=self.run)
        t.start()

    def run(self
            ):  # No "_" in the name, but nevertheless, running in the backed
        """After the fork. Now the process starts running
        """
        # print(self.pre," ==> run")

        self.preRun_()
        self.running = True

        while (self.running):
            self.cycle_()
            self.handleSignal_()

        self.postRun_()

    def handleSignal_(self):
        """Signals handling in the backend
        """
        if (self.signal_in.is_set()):
            signal_dic = self.childpipe.recv()
            method_name = signal_dic.pop("name")
            method = getattr(self, method_name)
            method(**signal_dic)
            self.signal_in.clear()
            self.signal_out.set()

    def sendSignal(self, **kwargs
                   ):  # sendSignal(name="test",test_int=1,test_str="kokkelis")
        """Incoming signals: this is used by frontend methods to send signals to the backend
        """
        try:
            name = kwargs.pop("name")
        except KeyError:
            raise (AttributeError("Signal name missing"))

        # a dictionary: {"parameter_name" : parameter_type}
        model = self.incoming_signal_defs[name]

        for key in kwargs:
            # raises error if user is using undefined signal
            model_type = model[key]
            parameter_type = kwargs[key].__class__
            if (model_type == parameter_type):
                pass
            else:
                raise (AttributeError("Wrong type for parameter " + str(key)))

        kwargs["name"] = name

        self.pipe.send(kwargs)
        self.signal_out.clear()
        self.signal_in.set()  # indicate that there is a signal
        self.signal_out.wait()  # wait for the backend to clear the signal

    def handleSignal(self, signal_dic):
        """Signal handling in the frontend
        """
        method_name = signal_dic.pop("name")
        method = getattr(self, method_name)
        method(**signal_dic)

    def sendSignal_(self, **kwargs):  # sendSignal_(name="test_out",..)
        """Outgoing signals: signals from backend to frontend
        """
        try:
            name = kwargs.pop("name")
        except KeyError:
            raise (AttributeError("Signal name missing"))

        # a dictionary: {"parameter_name" : parameter_type}
        model = self.outgoing_signal_defs[name]

        for key in kwargs:
            # raises error if user is using undefined signal
            try:
                model_type = model[key]
            except KeyError:
                print("your outgoing_signal_defs for", name, "is:", model)
                print("you requested key:", key)
                raise
            parameter_type = kwargs[key].__class__
            if (model_type == parameter_type):
                pass
            else:
                raise (AttributeError("Wrong type for parameter " + str(key)))

        kwargs["name"] = name

        self.childpipe.send(kwargs)

    # *** backend methods corresponding to each incoming signals ***

    def stop_(self):
        self.running = False

    def test_(self, test_int=0, test_str="nada"):
        print(self.pre, "test_ signal received with", test_int, test_str)

    # ** frontend methods corresponding to each incoming signal: these communicate with the backend via pipes **

    def stop(self):
        self.sendSignal(name="stop_")

    def test(self, **kwargs):
        dictionaryCheck(self.incoming_signal_defs["test_"], kwargs)
        kwargs["name"] = "test_"
        self.sendSignal(**kwargs)

    # ** frontend methods corresponding to each outgoing signal **

    # typically, there is a QThread in the frontend-side reading the process pipe
    # the QThread reads kwargs dictionary from the pipe, say
    # {"name":"test_o", "test_str":"eka", "test_int":1}
    # And calls handleSignal(kwargs)

    def test_o(self, **kwargs):
        pass
Exemplo n.º 51
0
class DataProcessingMultiprocessing(DataProcessing):
    def __init__(self, options, log=None):
        DataProcessing.__init__(self, options)
        self.log = log
        self.exit = Event()

        utils.startup(self.options)
        # First queue - Send the jobs
        self.tasks = multiprocessing.JoinableQueue(maxsize=10000)
        # Second one - Send the number of images to process
        self.N_queue = multiprocessing.Queue()
        # Third one - the results
        self.results = multiprocessing.Queue()

        # Process to look after the files
        self.FS = FileSentinel(self.tasks, self.N_queue, self.options)

        # Real workers...
        if 'h5' in self.options['file_extension'].lower():
            from .MultiProcess import MProcessEiger as MProcess
        else:
            from .MultiProcess import MProcess
        self.consumers = [
            MProcess(self.tasks,
                     self.results,
                     self.options,
                     self.ai,
                     self.detector,
                     name=str(i)) for i in range(self.options['cpus'])
        ]
        # Some stats
        self.statsManager = StatsManager(self.options, self.results)

    def run(self):
        self.startMP()
        self.startFS()

        i = 0
        if self.options["live"]:
            try:
                while not self.exit.is_set():
                    self.getStats()
                    i += 1
            except KeyboardInterrupt:
                print(
                    "\n\nCtrl-c received! --- Aborting and trying not to compromising results..."
                )

        else:
            while (self.statsManager.processed != self.statsManager.total
                   or self.statsManager.processed
                   == 0) and not self.exit.is_set():
                try:
                    self.getStats()
                except KeyboardInterrupt:
                    print(
                        "\n\nCtrl-c received --- Aborting and trying not to compromising results..."
                    )
                    break

        self.statsManager.getFinalResults()

    def shutDown(self):
        self.exit.set()

    def getStats(self):
        while True or self.statsManager.processed != self.statsManager.total:
            try:
                self.statsManager.total = self.N_queue.get(block=False,
                                                           timeout=None)
                self.statsManager.chunk = max(
                    int(self.statsManager.total / 1000.), 20)
            except Queue.Empty:
                pass

            try:
                self.statsManager.getResults()
            except Queue.Empty:
                break

    def startFS(self):
        self.FS.daemon = True
        self.FS.start()

    def startMP(self):
        """ Start as many processes as set by the user
    	"""
        for w in self.consumers:
            w.start()
Exemplo n.º 52
0
class SerialPort(Process):
    """
    A multiprocessing based wrapper for an instance of pyserial Serial.
    A RobotInterface class manages all instances of this class.
    This class is for internal use only
    """

    port_updates_per_second = 1000

    def __init__(self, port_address):
        """

        :param queue: a multiprocessing queue to which packets are passed
        :param lock: a shared lock to prevent multiple sources accessing the queue
        :param counter: a queue size counter. Keeps track of the number of packets in the queue
        :param updates_per_second: How often the port should update. This is passed to a Clock instance
        """

        self.address = port_address

        self.packet_queue = Queue()
        self.counter = Value('i', 0)
        self.lock = Lock()
        self.queue_len = 0

        # status variables
        self.configured = True
        self.abides_protocols = True
        self.port_assigned = False

        self.message_lock = Lock()
        self.error_message = Queue()

        self.print_out_lock = Lock()
        self.debug_print_outs = Queue()

        # time variables
        self.start_time = 0.0
        self.loop_time = 0.0

        # whoiam ID info
        self.whoiam = None  # ID tag of the microcontroller
        self.whoiam_header = "iam"  # whoiam packets start with "iam"
        self.whoiam_ask = "whoareyou"

        # first packet info
        self.first_packet = None
        self.first_packet_ask = "init?"
        self.first_packet_header = "init:"

        self.stop_packet_ask = "stop"
        self.stop_packet_header = "stopping"

        self.protocol_timeout = 3  # seconds
        self.protocol_packets = [
            self.whoiam_header, self.first_packet_header,
            self.stop_packet_header
        ]

        # misc. serial protocol
        self.packet_end = "\n"  # what this microcontroller's packets end with
        self.default_rate = 115200
        self.baud_rate = Value('i', self.default_rate)

        # buffer for putting packets into
        self.buffer = ""
        self.prev_read_packets = []
        self.prev_write_packet = ""

        # leaves tabs, letters, numbers, spaces, newlines, and carriage returns
        self.buffer_pattern = re.compile("([^\r\n\t\x20-\x7e]|_)+")

        # events and locks
        self.exit_event_lock = Lock()
        self.start_event_lock = Lock()

        self.exit_event = Event()
        self.start_event = Event()
        self.stop_event = Event()

        self.serial_lock = Lock()

        self.serial_ref = None

        super(SerialPort, self).__init__(target=self.update)

    # ----- initialization methods -----

    def initialize(self):
        # attempt to open the serial port
        try:
            self.serial_ref = serial.Serial(port=self.address,
                                            baudrate=self.baud_rate.value)
        except SerialException as error:
            self.handle_error(error, traceback.format_stack())
            self.configured = False

        time.sleep(2)  # wait for microcontroller to wake up

        if self.configured:
            # Find the ID of this port. The ports will be matched up to the correct RobotObject later
            self.find_whoiam()
            if self.whoiam is not None:
                self.find_first_packet()
            else:
                self.debug_print(
                    "whoiam ID was None, skipping find_first_packet")
        else:
            self.debug_print("Port not configured. Skipping find_whoiam")

    def send_start(self):
        """
        Send the start flag
        For external use
        :return: None
        """
        self.debug_print("sending start")
        return self.write_packet("start")

    def find_whoiam(self):
        """
        Get the whoiam packet from the microcontroller. This method will wait 1 second
        until the packet is received

        example:
            sent: "whoareyou\n"
            received: "iamlidar\n"

        When the packet is found, parse_whoiam_packet is called and whoiam is assigned

        For initialization
        :return: whoiam packet and first_packet
        """

        self.whoiam = self.check_protocol(self.whoiam_ask, self.whoiam_header)

        if self.whoiam is not None:
            self.debug_print("%s has ID '%s'" % (self.address, self.whoiam))
        else:
            # self.configured = False
            self.abides_protocols = False
            self.debug_print("Failed to obtain whoiam ID!", ignore_flag=True)

    def find_first_packet(self):
        """
        Get the first packet from the microcontroller. This method will wait 1 second
        until the packet is received

        example:
            sent: "init?\n"
            received: "init:\n" (if nothing to init, initialization methods not called)
            received: "init:something interesting\t01\t23\n"
                'something interesting\t01\t23' would be the first packet

        When the packet is found, parse_whoiam_packet is called and whoiam is assigned

        For initialization
        :return: whoiam packet and first_packet
        """
        self.first_packet = self.check_protocol(self.first_packet_ask,
                                                self.first_packet_header)

        if self.first_packet is not None:
            self.debug_print("sent initialization data: %s" %
                             repr(self.first_packet))
        else:
            # self.configured = False
            self.abides_protocols = False
            self.debug_print("Failed to obtain first packet!",
                             ignore_flag=True)

    def check_protocol(self, ask_packet, recv_packet_header):
        """
        A call and response method. After an "ask packet" is sent, the process waits for
        a packet with the expected header for 2 seconds

        For initialization

        :param ask_packet: packet to send
        :param recv_packet_header: what the received packet should start with
        :return: the packet received without the header and packet end
        """
        self.debug_print("Checking '%s' protocol" % ask_packet)

        if not self.write_packet(ask_packet):
            return None  # return None if write failed

        start_time = time.time()
        abides_protocol = False
        answer_packet = ""
        attempts = 0
        rounded_time = 0

        # wait for the correct response
        while not abides_protocol:
            in_waiting = self.in_waiting()
            if in_waiting > 0:
                packets = self.read_packets(in_waiting)
                if packets is None:
                    return None
                self.print_packets(packets)

                # return None if read failed
                if packets is None:
                    self.handle_error(
                        "Serial read failed... Board never signalled ready",
                        traceback.format_stack())
                    return None

                # parse received packets
                for packet in packets:
                    if len(packet) == 0:
                        self.debug_print("Empty packet! Contained only \\n")
                        continue
                    if packet[0:len(
                            recv_packet_header
                    )] == recv_packet_header:  # if the packet starts with the header,
                        self.debug_print("received packet: " + repr(packet))

                        answer_packet = packet[len(
                            recv_packet_header):]  # record it and return it

                        abides_protocol = True

            prev_rounded_time = rounded_time
            rounded_time = int((time.time() - start_time) * 10)
            if rounded_time > 5 and rounded_time % 3 == 0 and prev_rounded_time != rounded_time:
                attempts += 1
                self.debug_print("Writing '%s' again" % ask_packet)
                if not self.write_packet(ask_packet):
                    return None

            # return None if operation timed out
            if (time.time() - start_time) > self.protocol_timeout:
                self.handle_error(
                    "Didn't receive response for packet '%s'. Operation timed out."
                    % ask_packet, traceback.format_stack())
                return None

        return answer_packet  # when the while loop exits, abides_protocol must be True

    def handle_error(self, error, stack_trace):
        """
        When errors occur in a RobotSerialPort, the process doesn't crash. The error is recorded,
        self.update is stopped, and the main process is notified so all other ports can close safely

        For initialization and process use

        :param error: The error message to record
        :return: None
        """
        with self.exit_event_lock:
            self.exit_event.set()

        # if self.error_message.empty():
        full_message = ""
        for line in stack_trace:
            full_message += str(line)

        if type(error) == str:
            full_message += error
        else:
            full_message += "%s: %s\n" % (error.__class__.__name__, str(error))

        full_message += "Previous read: %s, write: %s" % (
            self.prev_read_packets, self.prev_write_packet)

        with self.message_lock:
            # queue will always be size of one. Easiest way to share strings and avoid race conditions.
            # (sometimes the error message would have arrived incomplete because
            # it gets printed before it gets formed...)
            self.error_message.put(full_message)

    # ----- run methods -----

    def update(self):
        """
        Called when RobotSerialPort.start is called

        :return: None
        """

        self.start_time = time.time()
        clock = Clock(SerialPort.port_updates_per_second)
        clock.start(self.start_time)

        # with self.start_event_lock:
        #     self.start_event.set()

        time.sleep(0.01)

        with self.baud_rate.get_lock():
            if self.baud_rate.value != self.default_rate:  # if changed externally
                self.serial_ref.baudrate = self.baud_rate.value
                self.debug_print("Baud is now", self.serial_ref.baudrate)
            else:
                self.debug_print("Baud rate unchanged")

        with self.start_event_lock:
            self.start_event.set()

        try:
            while True:
                with self.exit_event_lock:
                    if self.exit_event.is_set():
                        break

                # close the process if the serial port isn't open
                with self.serial_lock:
                    if not self.serial_ref.isOpen():
                        self.stop()
                        raise RobotSerialPortClosedPrematurelyError(
                            "Serial port isn't open for some reason...", self)

                    in_waiting = self.in_waiting()
                    if in_waiting is None:
                        self.stop()
                        raise RobotSerialPortClosedPrematurelyError(
                            "Failed to check serial. Is there a loose connection?",
                            self)
                    elif in_waiting > 0:
                        # read every possible character available and split them into packets
                        packets = self.read_packets(in_waiting)
                        if packets is None:  # if the read failed
                            self.stop()
                            raise RobotSerialPortReadPacketError(
                                "Failed to read packets", self)

                        # put data found into the queue
                        with self.lock:
                            for packet in packets:
                                put_on_queue = True
                                for header in self.protocol_packets:
                                    if len(packet) >= len(
                                            header
                                    ) and packet[:len(header)] == header:
                                        if header == self.stop_packet_header:
                                            self.stop()
                                            raise RobotSerialPortClosedPrematurelyError(
                                                "Port signalled to exit (stop flag was found)",
                                                self)
                                        else:
                                            self.debug_print(
                                                "Misplaced protocol packet:",
                                                repr(packet))
                                        put_on_queue = False
                                if put_on_queue:
                                    self.packet_queue.put(
                                        (time.time(), packet))
                                    # start_time isn't used. The main process has its own initial time reference

                            self.counter.value += len(packets)

                clock.update()  # maintain a constant loop speed
        except KeyboardInterrupt:
            self.debug_print("KeyboardInterrupt in port loop")

        self.debug_print("Current buffer:", repr(self.buffer))
        self.debug_print("While loop exited. Exit event triggered.")

        if not self.send_stop_events():
            self.handle_error("Stop flag failed to send!",
                              traceback.format_stack())

    def in_waiting(self):
        try:
            return self.serial_ref.inWaiting()
        except OSError as error:
            self.debug_print(
                "Failed to check serial. Is there a loose connection?")
            self.handle_error(error, traceback.format_stack())
            return None

    def read_packets(self, in_waiting):
        """
        Read all available data on serial and split them into packets as
        indicated by packet_end.

        For initialization and process use

        :return: None indicates the serial read failed and that the communicator thread should be stopped.
            returns the received packets otherwise
        """
        try:
            # read every available character
            if self.serial_ref.isOpen():
                incoming = self.serial_ref.read(in_waiting)
            else:
                self.handle_error("Serial port wasn't open for reading...",
                                  traceback.format_stack())
                return None

        except BaseException as error:
            self.handle_error(error, traceback.format_stack())
            return None

        if len(incoming) > 0:
            # append to the buffer
            try:
                self.buffer += incoming.decode("utf-8", "ignore")
            except UnicodeDecodeError as error:
                self.handle_error(error, traceback.format_stack())
                return None

            buf = self.buffer_pattern.sub('', self.buffer)
            if len(self.buffer) != len(buf):
                self.debug_print("Invalid characters found:",
                                 repr(self.buffer))
            self.buffer = buf

            if len(self.buffer) > len(self.packet_end):
                # split based on user defined packet end
                packets = self.buffer.split(self.packet_end)
                self.prev_read_packets = packets

                # reset the buffer
                self.buffer = packets.pop(-1)

                return packets
        return []

    def write_packet(self, packet):
        """
        Safely write a packet over serial. Automatically appends packet_end to the input.

        For initialization and process use

        :param packet: an arbitrary string without packet_end in it
        :return: True or False if the write was successful
        """
        self.prev_write_packet = str(packet)
        try:
            data = bytearray(str(packet) + self.packet_end, 'ascii')
        except TypeError as error:
            self.handle_error(error, traceback.format_stack())
            return False

        try:
            if self.serial_ref.isOpen():
                with self.serial_lock:
                    self.serial_ref.write(data)
            else:
                self.handle_error("Serial port wasn't open for writing...",
                                  traceback.format_stack())
                return False
        except BaseException as error:
            self.handle_error(error, traceback.format_stack())
            return False

        return True

    def print_packets(self, packets):
        """
        If debug_prints is True, print repr of all incoming packets

        :param packets: a list of received packets
        :return: None
        """
        for packet in packets:
            self.debug_print("> %s" % repr(packet))

    def flush(self):
        self.debug_print("Flushing serial")
        self.serial_ref.reset_input_buffer()
        self.serial_ref.reset_output_buffer()
        self.debug_print("Serial content:", self.in_waiting())

    # ----- external and status methods -----

    def debug_print(self, *strings, ignore_flag=False):
        """
        For initialization and process use

        :param strings:
        :param ignore_flag:
        :return:
        """
        string = "[%s] %s" % (self.whoiam, " ".join(map(str, strings)))
        with self.print_out_lock:
            self.debug_print_outs.put(string)

    def change_rate(self, new_baud_rate):
        """
        For external use

        :param new_baud_rate:
        :return:
        """
        self.debug_print("Setting baud to", new_baud_rate)
        with self.baud_rate.get_lock():
            self.baud_rate.value = new_baud_rate
        self.debug_print("Set baud to", self.baud_rate.value)

    def is_running(self):
        """
        Check if the port's thread is running correctly

        For external use

        :return:
            -1: event event thrown
            0: self.configured is False
            1: process hasn't started or everything is fine
        """
        with self.start_event_lock:
            if not self.start_event.is_set():  # process hasn't started
                return 1

        if not self.configured:
            return 0

        with self.exit_event_lock:
            if self.exit_event.is_set():
                return -1

        return 1

    def send_stop_events(self):
        """

        For process use

        :return:
        """

        # with self.exit_event_lock:
        #     if self.exit_event.is_set():
        #         self.debug_print("Exit event already set. Stop was sent internally")
        #         return True

        if self.start_time > 0 and time.time(
        ) - self.start_time <= 2:  # wait for arduino to listen
            time.sleep(2)

        if not self.stop_event.is_set():
            if self.check_protocol(self.stop_packet_ask,
                                   self.stop_packet_header) is None:
                self.debug_print("Failed to send stop flag!!!")
                return False
            else:
                self.debug_print("Sent stop flag")
            self.stop_event.set()

            self.debug_print("Acquiring start lock")
            with self.start_event_lock:
                if not self.start_event.is_set():
                    self.debug_print("start_event not set! Closing serial")
                    self.close_serial()
            self.debug_print("Releasing start lock")
        else:
            self.debug_print("Stop event already set!")

        return True

    def close_serial(self):
        """

        For external use

        :return:
        """
        self.debug_print("Acquiring serial lock")
        with self.serial_lock:
            if self.configured:
                if self.serial_ref.isOpen():
                    self.serial_ref.stop()
                    self.debug_print("Closing serial")
                else:
                    self.debug_print("Serial port was already closed!")
            else:
                self.debug_print("Port wasn't configured!!")
        self.debug_print("Releasing serial lock")

    def stop(self):
        """
        Send stop packet, close the serial port.

        For external use
        :return: None
        """

        self.debug_print("Acquiring exit lock")
        if not self.exit_event.is_set():
            self.exit_event.set()
        else:
            self.debug_print("Exit event already set! Error was likely thrown")
        self.debug_print("Releasing exit lock")

    def has_exited(self):
        """

        For external use

        :return:
        """
        return self.exit_event.is_set()

    def __str__(self):
        return "%s(port_address=%s)" % (self.__class__.__name__, self.address)

    def __repr__(self):
        return self.__str__()
Exemplo n.º 53
0
class ProcessServer(threading.Thread):
    '''
    This type manages the creation of subprocesses and the despatch of computational tasks to them. Typically the global
    instance is created at startup through createGlobalServer() at which point the subprocesses are created. Tasks are
    enqueued to be executed through callProcessFunc() or indirectly if a routine decorated with @concurrent is called.
    
    '''

    globalServer=None # global instance of the server

    @staticmethod
    def createGlobalServer(realnumprocs=cpu_count()):
        '''Creates the global instance of the server, `realnumprocs' being the number of processes to create.'''
        ProcessServer.globalServer=ProcessServer(realnumprocs)
        ProcessServer.globalServer.start()

    def __init__(self,realnumprocs=cpu_count()):
        threading.Thread.__init__(self)
        self.daemon=True

        self.realnumprocs=clamp(realnumprocs,1,cpu_count())
        self.procs=[]
        self.sharer=ObjectSharer()
        self.syncEvent=Event()
        self.syncEvent2=Event()
        self.syncCounter=Value('i',0)
        self.syncLock=Lock()
        self.jobqueue=queue.Queue()
        self.progress=Array('l',self.realnumprocs)
        self.objsrv=ObjectServer()
        self.stopEvent=Event()

    def callProcessFunc(self,valrange,numprocs,task,target,*args,**kwargs):
        '''
        Sends the request to execute `target' in parallel with the given `args' and `kwargs' argument values. The 
        `valrange' value is used to set the value range members of the AlgorithmProcess objects, `numprocs' is the 
        number of processes to execute `target' on (1 will cause `target' to be executed in the main process, a value 
        <=0 will mean all the processes will be used), and `task' is the (possibly None) Task object to report 
        progress to. The return value is a Future object which will contain the results or exceptions thrown.
        
        The `target' routine must exist in a module scope at runtime so inner routines or dynamically created routines
        cannot be used here. When called, the first argument will be the AlgorithmProcess object followed by those in
        `args' and `kwargs'. The AlgorithmProcess instance will be different for each process `target' is called in,
        its members will state which process and the subset of `valrange' assigned to it. The expectation is that
        `valrange' is the number of elements `target' is used to operate on, when called each process is assigned a 
        subset of the range [0,`valrange') and the internal algorithm of `target' is expected to iterate over those 
        values only. 
        
        The optional argument "partitionArgs" in `kwargs' may contain a tuple containing the members in `args' which
        are iterable and which should be partitioned amongst the processes. Values in `args' and `kwargs' are normally
        copied to each process, so this allows large iterables to be partitioned and not needlessly duplicated.
        
        The @concurrent can be used to wrap the invocation of callProcessFunc() within the function definition itself.
        The first argument of the routine must still be the AlgorithmProcess object but when called three arguments
        representing `valrange', `numprocs', and `task' must be provided instead. The routine will also no block until
        the processing is complete and return the results instead of a Future object.
        
        Example:
            def testfunc(process):
                return (process.index,int(process.startval),int(process.endval))
            
            result=ProcessServer.globalServer.callProcessFunc(50,4,None,testfunc)
            printFlush(listResults(result()))
        
        Output: [(0, 0, 12), (1, 12, 25), (2, 25, 37), (3, 37, 50)]
        
        Example:
            @concurrent 
            def testfunc(process,values):
                return (process.index,values)
            
            values=range(20)
            result=testfunc(len(values),3,None,values,partitionArgs=(values,))
            printFlush(listResults(result))
            
        Output: [(0, [0, 1, 2]), (1, [3, 4, 5]), (2, [6, 7, 8, 9])]
        '''
        result=Future()
        self.jobqueue.put((valrange,numprocs,task,target,args,kwargs,result))
        return result

    def prepareArgs(self,index,numprocs,args,partArgs):
        '''Prepare arguments by dividing lists/tuples present in both `args' and `partArgs' into the slice for proc `index'.'''
        pargs=[]
        for a in args: # construct an argument list `pargs' to be passed to the process object
            if a in partArgs:
                astart,aend=partitionSequence(len(a),index,numprocs)
                pargs.append(a[astart:aend]) # replace `a' with a per-process slice of `a'
            else:
                pargs.append(a) # use `a' directly

        return pargs

    def run(self):
        '''Run the server, reading messages from the job queue and sending them to the work processes.'''
        atexit.register(self.stop)

        # do not create processes if the number of procs is 1, this forces single process mode
        if self.realnumprocs>1:
            # start all the processes
            for i in range(self.realnumprocs):
                psharer=self.objsrv.getProxy(self.sharer)
                p=AlgorithmProcess(i,self.realnumprocs,self.syncEvent,self.syncEvent2,self.syncCounter,self.syncLock,psharer,self.progress,self.stopEvent,os.getpid())
                p.start()
                self.procs.append(p)

        # continually read items from `self.jobqueue' and send the execution requests to the processes
        while not self.stopEvent.is_set():
            valrange,numprocs,task,target,args,kwargs,result=self.jobqueue.get(True) # get message
            
            partArgs=kwargs.pop('partitionArgs',()) # get list of objects to partition between processes
            numprocs=min(valrange,self.realnumprocs if numprocs<=0 or numprocs>self.realnumprocs else numprocs) # get number of processes to use,

            self.sharer.clear()
            
            for i in range(self.realnumprocs): # reset the progress counting shared array
                self.progress[i]=0

            if task: # set the task's progress value
                task.setMaxProgress(valrange)

            with result:
                if numprocs==1 or not self.procs: # if we are to use only one process execute locally instead of 1 concurrent process
                    try:
                        # construct a local process, passing None for parameters clues it in to not try using concurrency features like syncing
                        localproc=AlgorithmProcess(0,1,None,None,None,None,None,task,None,0)
                        localproc.endval=valrange
                        localproc.maxval=valrange
                                
                        tresult=target(localproc,*args,**kwargs)
                        result.setObject({0:tresult})
                    except Exception as e:
                        printFlush('LOCALPROC',e)
                        traceback.print_exc()
                        result.setObject({0:e})

                else: # otherwise use work processes
                    self.syncCounter.value=0 # reset the sync counter, the processes can't do this themselves cleanly without race condition

                    # for each process prepare the arguments to the target and send the request through its `send' pipe
                    for i in range(numprocs):
                        start,end=partitionSequence(valrange,i,numprocs)
                        pargs=self.prepareArgs(i,numprocs,args,partArgs)

                        self.procs[i].send.send((target,pargs,kwargs,start,end,valrange,numprocs)) # send the job

                    try:
                        # wait for each process being used to finish, updating the task object's progress if it's present
                        while any(not p.rrecv.poll(0.01) for p in self.procs[:numprocs]): # a process is done when it has sent its result
                            if task:
                                task.setProgress(sum(self.progress))

                        if task: # do a final update
                            task.setProgress(sum(self.progress))

                        result.setObject(dict((p.index,p.rrecv.recv()) for p in self.procs[:numprocs])) # map results to process index
                    except Exception as e:
                        # some error occurred, send e as the result for each process even though it wasn't actually thrown by the processes
                        result.setObject(dict((p.index,e) for p in self.procs[:numprocs]))

    def stop(self):
        '''Stops the processes and object server, no execution after this is possible.'''
        self.objsrv.stop()
        self.stopEvent.set()
Exemplo n.º 54
0
class PlantDraw(object):
    def __init__(self, plant, refresh_period=(1.0/240),
                 name='PlantDraw', *args, **kwargs):
        super(PlantDraw, self).__init__()
        self.name = name
        self.plant = plant
        self.drawing_thread = None
        self.polling_thread = None

        self.dt = refresh_period
        self.exec_time = time()
        self.scale = 150  # pixels per meter

        self.center_x = 0
        self.center_y = 0
        self.running = Event()

        self.polling_pipe, self.drawing_pipe = Pipe()

    def init_ui(self):
        plt.close(self.name)
        self.fig = plt.figure(self.name)
        self.ax = plt.gca()
        self.ax.set_xlim([-1.5, 1.5])
        self.ax.set_ylim([-1.5, 1.5])
        self.ax.set_aspect('equal', 'datalim')
        self.ax.grid(True)
        self.fig.canvas.draw()
        self.bg = self.fig.canvas.copy_from_bbox(self.ax.bbox)
        self.cursor = Cursor(self.ax, useblit=True, color='red', linewidth=2)
        self.init_artists()
        # plt.ion()
        plt.show(False)

    def drawing_loop(self, drawing_pipe):
        # start the matplotlib plotting
        self.init_ui()

        while self.running.is_set():
            exec_time = time()
            # get any data from the polling loop
            updts = None
            while drawing_pipe.poll():
                data_from_plant = drawing_pipe.recv()
                if data_from_plant is None:
                    self.running.clear()
                    break

                # get the visuzlization updates from the latest state
                state, t = data_from_plant
                updts = self.update(state, t)
                self.update_canvas(updts)

            # sleep to guarantee the desired frame rate
            exec_time = time() - exec_time
            plt.waitforbuttonpress(max(self.dt-exec_time, 1e-9))
        self.close()

    def close(self):
        # close the matplotlib windows, clean up
        # plt.ioff()
        plt.close(self.fig)

    def update(self, *args, **kwargs):
        plt.figure(self.name)
        updts = self._update(*args, **kwargs)
        self.update_canvas(updts)

    def _update(self, *args, **kwargs):
        msg = "You need to implement the self._update() method in your\
 PlantDraw class."
        raise NotImplementedError(msg)

    def init_artists(self, *args, **kwargs):
        msg = "You need to implement the self.init_artists() method in your\
 PlantDraw class."
        raise NotImplementedError(msg)

    def update_canvas(self, updts):
        if updts is not None:
            # update the drawing from the plant state
            self.fig.canvas.restore_region(self.bg)
            for artist in updts:
                self.ax.draw_artist(artist)
            self.fig.canvas.update()
            # sleep to guarantee the desired frame rate
            exec_time = time() - self.exec_time
            plt.waitforbuttonpress(max(self.dt-exec_time, 1e-9))
        self.exec_time = time()

    def polling_loop(self, polling_pipe):
        current_t = -1
        while self.running.is_set():
            exec_time = time()
            state, t = self.plant.get_state(noisy=False)
            if t != current_t:
                polling_pipe.send((state, t))

            # sleep to guarantee the desired frame rate
            exec_time = time() - exec_time
            sleep(max(self.dt-exec_time, 0))

    def start(self):
        print_with_stamp('Starting drawing loop', self.name)
        self.drawing_thread = Process(target=self.drawing_loop,
                                      args=(self.drawing_pipe, ))
        self.drawing_thread.daemon = True
        self.polling_thread = Thread(target=self.polling_loop,
                                     args=(self.polling_pipe, ))
        self.polling_thread.daemon = True
        # self.drawing_thread = Process(target=self.run)
        self.running.set()
        self.polling_thread.start()
        self.drawing_thread.start()

    def stop(self):
        self.running.clear()

        if self.drawing_thread is not None and self.drawing_thread.is_alive():
            # wait until thread stops
            self.drawing_thread.join(10)

        if self.polling_thread is not None and self.polling_thread.is_alive():
            # wait until thread stops
            self.polling_thread.join(10)

        print_with_stamp('Stopped drawing loop', self.name)
Exemplo n.º 55
0
class AppProcess(Process):
    """
    Process with some additional functionality and fixes
      * Support for a multiprocessing logger
      * Removes signals to prevent join problems
      * Propagates exceptions to owner process
      * Safe terminate with timeout, followed by force terminate
    """

    # Timeout before process is force terminated
    __DEFAULT_TERMINATE_TIMEOUT_MS = 1000

    def __init__(self, name: str):
        self.__name = name
        super().__init__(name=self.__name)

        self.mp_logger = None
        self.logger = logging.getLogger(self.__name)
        self.__exception_queue = Queue()
        self._terminate = Event()

    def set_multiprocessing_logger(self, mp_logger: MultiprocessingLogger):
        self.mp_logger = mp_logger

    @overrides(Process)
    def run(self):
        # Replace the signal handlers that may have been set by main process to
        # default handlers. Having non-default handlers in subprocesses causes
        # a deadlock when attempting to join the process
        # Info: https://stackoverflow.com/a/631605

        # NOTE: There is a minuscule chance of deadlock if a signal is received
        #       between start of the method and these resets.
        #       The ideal solution is to remove the signal before the process is
        #       started. Unfortunately that's difficult to do here because the
        #       subprocess is started from a job thread, and python doesn't
        #       allow setting signals from outside the main thread.
        #       So we accept this risk for the quick and easy solution here
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        signal.signal(signal.SIGINT, signal.SIG_DFL)

        # Set the thread name for convenience
        threading.current_thread().name = self.__name

        # Configure the logger for this process
        if self.mp_logger:
            self.logger = self.mp_logger.get_process_safe_logger().getChild(self.__name)

        self.logger.debug("Started process")

        self.run_init()

        try:
            while not self._terminate.is_set():
                self.run_loop()
            self.logger.debug("Process received terminate flag")
        except ServiceExit:
            self.logger.debug("Process received a ServiceExit")
        except Exception as e:
            self.logger.debug("Process caught an exception")
            self.__exception_queue.put(ExceptionWrapper(e))
            raise
        finally:
            self.run_cleanup()

        self.logger.debug("Exiting process")

    @overrides(Process)
    def terminate(self):
        # Send a terminate signal, and force terminate after a timeout
        self._terminate.set()

        def elapsed_ms(start):
            delta_in_s = (datetime.now() - start).total_seconds()
            delta_in_ms = int(delta_in_s * 1000)
            return delta_in_ms

        timestamp_start = datetime.now()
        while self.is_alive() and \
                elapsed_ms(timestamp_start) < AppProcess.__DEFAULT_TERMINATE_TIMEOUT_MS:
            pass

        super().terminate()

    def propagate_exception(self):
        """
        Raises any exception that was caught by the process
        :return:
        """
        try:
            exc = self.__exception_queue.get(block=False)
            raise exc.re_raise()
        except queue.Empty:
            pass

    @abstractmethod
    def run_init(self):
        """
        Called once before the run loop
        :return:
        """
        pass

    @abstractmethod
    def run_cleanup(self):
        """
        Called once before cleanup
        :return:
        """
        pass

    @abstractmethod
    def run_loop(self):
        """
        Process behaviour should be implemented here.
        This function is repeatedly called until process exits.
        The check for graceful shutdown is performed between the loop iterations,
        so try to limit the run time for this method.
        :return:
        """
        pass
Exemplo n.º 56
0
class DAMSComm(Process):
    def __init__(self, ip_address, port, data_queue, message_length,
                 socket_timeout, socket_log_conf):
        Process.__init__(self)

        self._ip_address = ip_address
        self._port = port
        self._data_queue = data_queue
        self._message_length = message_length
        self._socket_timeout = socket_timeout
        self._socket_log_config = socket_log_conf
        self._shutdown_event = Event()

    def close(self):
        self._shutdown_event.set()
        return

    def connect(self):
        try:
            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            sock.settimeout(self._socket_timeout)
            sock.connect((self._ip_address, self._port))
            return sock
        except Exception as e:
            raise e
        return None

    def run(self):
        reconnect_attempts = -1
        reconnect_cnt = 0
        process_data = True
        logger = None
        try:
            logging.config.fileConfig(self._socket_log_config)
            logger = logging.getLogger("DAMSNTSocket")
            logger.info('Socket logging file opened.')

            logger.info("Connecting to ip: %s port: %d" %
                        (self._ip_address, self._port))
            try:
                sock = self.connect()
            except Exception as e:
                logger.error("Failed to connect to ip: %s port: %d" %
                             (self._ip_address, self._port))
                logger.exception(e)
            while process_data:
                if not self._shutdown_event.is_set():
                    if sock is not None:
                        try:
                            data = sock.recv(self._message_length)
                            if len(data) > 0:
                                self._data_queue.put(data)
                            else:
                                sock.close()
                                logger.error(
                                    "Disconnected, attempted reconnect.")
                                sock = None
                            reconnect_cnt = 0

                        except socket.timeout as e:
                            logger.error(
                                "Socket timed out. Closing for reconnect.")
                            sock.close()
                            sock = None
                            reconnect_cnt = 0

                        except Exception as e:
                            logger.exception(e)
                            sock.close()
                            sock = None
                            reconnect_cnt = 0
                    else:
                        if reconnect_attempts == -1 or reconnect_cnt <= reconnect_attempts:
                            logger.error("Reconnect: %d to ip: %s port: %d"\
                                         % (reconnect_cnt, self._ip_address, self._port))
                            try:
                                sock = self.connect()
                            except Exception as e:
                                logger.error(
                                    "Failed to connect to ip: %s port: %d" %
                                    (self._ip_address, self._port))
                                logger.exception(e)
                                time.sleep(5)

                            reconnect_cnt += 1
                        else:
                            logger.error(
                                "Exceeded reconnect attempts, exiting.")
                            process_data = False
                else:
                    logger.info("Shutdown event signalled.")
                    process_data = False
                    sock.close()
        except Exception as e:
            if logger is not None:
                logger.debug(e)
            else:
                traceback.print_exc()
Exemplo n.º 57
0
class Forwarder(Process):
    """
                                  +-------------------------+
                                  |         Redis           |
                                  +-----------||------------+
                                     RedisPubSub + hashset
                                              ||
                                  +-----------vv------------+
                                  |                         |
    forwarder-service ---reg_ep---|->      Forwarder        |
                                  |                         |
                                  +-------------------------+
                                  / / /       |     ^     |
                                 / / /        |     |     |
                                / / /         |     |     |
                               / / /       Tasks Results Commands
                              / / /           |     |     |
                             V V V            V     |     V
                       +-------------+     +--------------------+
                       |  Endpoint 1 | ... |     Endpoint N     |
                       +-------------+     +--------------------+


    Endpoint States
    ---------------
    - Registered: Endpoint has been registered with the service but has not yet
      connected
    - Connected: Endpoint has connected, meaning ZMQ messages can be sent and received
    - Disconnected: ZMQ message sending is failing for this endpoint, making it
      disconnected

    * All endpoints and endpoint states are forgotten when a forwarder restarts
    * Endpoints always start in the Registered state. Before that, the forwarder does
      not know they exist * Endpoints can send a registration message at any time while
      in Connected/Disconnected state and it will not impact the current state
    * Results can arrive on the forwarder from an endpoint when it is in any state. See
      the NOTE below at handle_results

    Endpoint State Transitions
    --------------------------
    Registered -> Connected =>
        Everything is working as expected for first time connection on forwarder
        (Endpoint is registered and ZMQ is working)

    Registered -> nothing =>
        Connectivity issue over the 5500* ports using ZMQ

    Connected -> Disconnected =>
        ZMQ network issues or endpoint is down

    Disconnected -> Connected =>
        Endpoint connection request received. This is either sent when a user manually
        restarts a Disconnected endpoint, or when an endpoint recognizes it has lost
        connection and automatically reconnects
    """
    def __init__(
        self,
        command_queue,
        response_queue,
        address: str,
        redis_address: str,
        rabbitmq_conn_params,
        endpoint_ports=(55001, 55002, 55003),
        redis_port: int = 6379,
        logging_level=logging.INFO,
        heartbeat_period=30,
        result_ttl: int = RESULT_TTL,
        keys_dir=None,
    ):
        """
        Parameters
        ----------
        command_queue: Queue
             Queue used by the service to send commands such as 'REGISTER_ENDPOINT'
             Forwarder expects dicts of the form
             {'command':<TERMINATE/REGISTER_ENDPOINT'> ...}

        response_queue: Queue
             Queue over which responses to commands are returned

        address : str
             Public address at which the forwarder will be accessible from the endpoints

        redis_address : str
             full address to connect to redis. Required

        endpoint_ports : (int, int, int)
             A triplet of ports: (tasks_port, results_port, commands_port)
             Default: (55001, 55002, 55003)

        redis_port : int
             redis port. Default: 6379

        logging_level: int
             Logging level. Default: logging.INFO

        heartbeat_period: int
             heartbeat interval in seconds. Default 2s

        result_ttl: int
             Task TTL in REDIS after result is available. Default=1hour

        keys_dir: str
             Directory in which curve keys will be stored, Default: '.curve'
        """
        if keys_dir is None:
            keys_dir = os.path.abspath(".curve")

        super().__init__()
        self.command_queue = command_queue
        self.response_queue = response_queue
        self.address = address
        self.redis_url = f"{redis_address}:{redis_port}"
        self.rabbitmq_conn_params = rabbitmq_conn_params
        self.tasks_port, self.results_port, self.commands_port = endpoint_ports
        self.connected_endpoints: t.Dict[str, t.Dict[str, t.Any]] = {}
        self.kill_event = Event()
        self.heartbeat_period = heartbeat_period
        self._last_heartbeat = time.time()
        self.keys_dir = keys_dir
        self.result_ttl = result_ttl
        # TODO: drop support for imperatively configuring the redis host information
        # for the forwarder. Instead, FUNCX_COMMON_REDIS_URL should be used
        redis_client = default_redis_connection_factory(
            f"redis://{redis_address}:{redis_port}")
        self.redis_pubsub = FuncxRedisPubSub(redis_client=redis_client)
        self.endpoint_db = EndpointDB(redis_client=redis_client)
        self.task_storage = get_default_task_storage()
        logger.info(f"Initializing forwarder v{funcx_forwarder.__version__}")
        logger.info(f"Forwarder running on public address: {self.address}")
        logger.info(f"REDIS url: {self.redis_url}")
        logger.info(f"Log level set to {loglevels[logging_level]}")

        if not os.path.exists(self.keys_dir) or not os.listdir(self.keys_dir):
            logger.info(f"Keys dir empty: {self.keys_dir}, creating keys")
            os.makedirs(self.keys_dir, exist_ok=True)
            forwarder_keyfile, _ = zmq.auth.create_certificates(
                self.keys_dir, "server")
        else:
            logger.info(
                f"Keys in {self.keys_dir}: {os.listdir(self.keys_dir)}")
            forwarder_keyfile = os.path.join(self.keys_dir, "server.key")

        try:
            with open(forwarder_keyfile) as f:
                self.forwarder_pubkey = f.read()
        except Exception:
            logger.exception(
                f"[CRITICAL] Failed to read server keyfile from {forwarder_keyfile}"
            )
            raise

    @property
    def redis_client(self) -> redis.Redis:
        # proxy attribute pointing to the underlying pubsub's client
        # this is used for now to get a client quickly and easily
        #
        # TODO: consider changes in funcx-common so that there's a more obvious way
        # to share a connection between the Forwarder and its attached pubsub
        return self.redis_pubsub.redis_client

    def command_processor(self, kill_event):
        """command_processor listens on the self.command_queue
        for commands and responds with results on the self.response_queue

        COMMAND messages are dicts of the form:
        {'command' : ['TERMINATE', 'REGISTER_ENDPOINT' ... 'ENDPOINT_LOAD_CONFIG'],
         'id': <ID:int>,
         'options' : ...
        }
        Responses are of the form:
        """
        try:
            while not kill_event.is_set():
                command = self.command_queue.get()
                logger.debug(f"[COMMAND] Received command {command}")
                if command["command"] == "LIVENESS":
                    response = {"response": True, "id": command.get("id")}
                elif command["command"] == "TERMINATE":
                    logger.info("[COMMAND] Received TERMINATE command")
                    response = {"response": True, "id": command.get("id")}
                    kill_event.set()
                elif command["command"] == "REGISTER_ENDPOINT":
                    logger.info("[COMMAND] Received REGISTER_ENDPOINT command")
                    result = self.register_endpoint(
                        command["endpoint_id"],
                        command["endpoint_address"],
                        command["client_public_key"],
                    )

                    response = {
                        "response": result,
                        "id": command.get("id"),
                        "endpoint_id": command["endpoint_id"],
                        "forwarder_pubkey": self.forwarder_pubkey,
                        "public_ip": self.address,
                        "tasks_port": self.tasks_port,
                        "results_port": self.results_port,
                        "commands_port": self.commands_port,
                    }

                else:
                    response = {
                        "response": False,
                        "id": command.get("id"),
                        "reason": "Unknown command",
                    }

                self.response_queue.put(response)
        except Exception:
            logger.exception("Caught exception while processing command")
            sys.exit(-1)

    def register_endpoint(self, endpoint_id, endpoint_address, key):
        """Add new client keys to the zmq authenticator

        Registering an existing endpoint_id is allowed
        """
        logger.info(
            "endpoint_registered",
            extra={
                "log_type": "endpoint_registered",
                "endpoint_id": endpoint_id
            },
        )

        self.update_endpoint_metadata(endpoint_id, endpoint_address)

        self.tasks_q.add_client_key(endpoint_id, key)
        self.results_q.add_client_key(endpoint_id, key)
        self.commands_q.add_client_key(endpoint_id, key)
        return True

    def update_endpoint_metadata(self, endpoint_id, endpoint_address):
        """Geo locate the endpoint and push as metadata into redis"""
        try:
            resp = requests.get(f"http://ipinfo.io/{endpoint_address}/json")
            self.endpoint_db.set_endpoint_metadata(endpoint_id, resp.json())
        except Exception:
            logger.error(f"Failed to geo locate {endpoint_address}")
        else:
            logger.info(f"Endpoint with {endpoint_address} is at {resp}")

    def initialize_endpoint_queues(self):
        """Initialize the three queues over which the forwarder communicates with endpoints
        TaskQueue in mode='server' binds to all interfaces by default
        """
        self.tasks_q = TaskQueue(
            "127.0.0.1",
            port=self.tasks_port,
            RCVTIMEO=1,
            keys_dir=self.keys_dir,
            mode="server",
        )
        self.results_q = TaskQueue("127.0.0.1",
                                   port=self.results_port,
                                   keys_dir=self.keys_dir,
                                   mode="server")
        self.commands_q = TaskQueue("127.0.0.1",
                                    port=self.commands_port,
                                    keys_dir=self.keys_dir,
                                    mode="server")
        return

    def disconnect_endpoint(self, endpoint_id):
        """Unsubscribes from Redis pubsub and "removes" endpoint from the tasks channel.
        This method does nothing if the endpoint is already disconnected.

        Triggered by zmq messages not getting delivered (heartbeats, tasks, result acks)
        TODO: This needs some extensive testing. It is unclear how well detecting
        failures will work on WAN networks with latencies.
        """
        disconnected_endpoint = self.connected_endpoints.pop(endpoint_id, None)
        # if the endpoint is already disconnected, simply return
        if not disconnected_endpoint:
            return

        logger.info(
            "endpoint_disconnected",
            extra={
                "log_type": "endpoint_disconnected",
                "endpoint_id": endpoint_id
            },
        )

        self.redis_pubsub.unsubscribe(endpoint_id)

    def add_endpoint_keys(self, ep_id, ep_key):
        """To remove. this is not used."""
        self.tasks_q.add_client_key(ep_key)
        self.results_q.add_client_key(ep_key)
        self.commands_q.add_client_key(ep_key)

    def add_subscriber(self, ep_id):
        self.redis_pubsub.subscribe(ep_id)

    def heartbeat(self):
        """ZMQ contexts are not thread-safe.
        heartbeats should happen on the same thread."""
        if self._last_heartbeat + self.heartbeat_period > time.time():
            return
        logger.info("Heartbeat")
        dest_endpoint_list = list(self.connected_endpoints.keys())
        for dest_endpoint in dest_endpoint_list:
            logger.debug(
                f"Sending heartbeat to {dest_endpoint}",
                extra={
                    "log_type": "endpoint_heartbeat_sent",
                    "endpoint_id": dest_endpoint,
                },
            )
            msg = Heartbeat(endpoint_id=dest_endpoint)
            try:
                self.tasks_q.put(dest_endpoint.encode("utf-8"), msg.pack())
                self.connected_endpoints[dest_endpoint][
                    "missed_heartbeats"] = 0

            except (zmq.error.ZMQError, zmq.Again):
                logger.exception(
                    f"Endpoint:{dest_endpoint} is unreachable over heartbeats")
                self.disconnect_endpoint(dest_endpoint)
        self._last_heartbeat = time.time()

    def handle_endpoint_connection(self):
        """Receive endpoint connection messages. Only connection messages
        are sent from the interchange -> forwarder on the task_q
        """
        try:
            b_ep_id, b_reg_message = self.tasks_q.get(
                timeout=0)  # timeout in ms # Update to 0ms
            # At this point ep_id is authenticated by means having the client keys.
            ep_id = b_ep_id.decode("utf-8")
            reg_message = pickle.loads(b_reg_message)

            if ep_id in self.connected_endpoints:
                # this is normal, it just means that the endpoint never reached
                # Disconnected state before connecting again
                logger.info(
                    f"[MAIN] Endpoint:{ep_id} attempted connect when it already is in "
                    "connected list",
                    extra={
                        "log_type": "endpoint_reconnected",
                        "endpoint_id": ep_id
                    },
                )
            else:
                logger.info(
                    "endpoint_connected",
                    extra={
                        "log_type": "endpoint_connected",
                        "endpoint_id": ep_id,
                        "registration_message": reg_message,
                    },
                )
                # Now subscribe to messages for ep_id
                # if this endpoint is already in self.connected_endpoints, it is already
                # subscribed
                self.add_subscriber(ep_id)

            self.connected_endpoints[ep_id] = {
                "registration_message": reg_message,
                "missed_heartbeats": 0,
            }
        except zmq.Again:
            pass
        except Exception:
            logger.exception("Caught exception while waiting for registration")

    def log_task_transition(self, task, transition_name):
        extra_logging = {
            "user_id": task.user_id,
            "task_id": task.task_id,
            "task_group_id": task.task_group_id,
            "function_id": task.function_id,
            "endpoint_id": task.endpoint,
            "container_id": task.container,
            "log_type": "task_transition",
        }
        logger.info(transition_name, extra=extra_logging)

    def forward_task_to_endpoint(self):
        """Migrates one task from redis to the appropriate endpoint

        Returns:
            int: Count of tasks migrated (0,1)
        """
        # Now wait for any messages on REDIS that needs forwarding.
        task = None
        try:
            dest_endpoint, task_id = self.redis_pubsub.get(timeout=0)
            task = RedisTask(self.redis_client, task_id)
            logger.debug(
                f"Got message from REDIS: {dest_endpoint}:{task}",
                extra={
                    "log_type": "forwarder_redis_task_get",
                    "endpoint_id": dest_endpoint,
                },
            )
        except queue.Empty:
            return 0
        except Exception:
            logger.exception("Caught exception waiting for message from REDIS")
            return 0

        task_payload = self.task_storage.get_payload(task)

        if dest_endpoint not in self.connected_endpoints:
            # At this point we should be unsubscribed and receiving only messages
            # from the TCP buffers.
            logger.warning(
                "Putting back REDIS message for unconnected endpoint: %s:%s",
                dest_endpoint,
                task,
                extra={
                    "log_type": "forwarder_redis_task_put",
                    "endpoint_id": dest_endpoint,
                },
            )
            self.redis_pubsub.put(dest_endpoint, task)
            self.redis_pubsub.unsubscribe(dest_endpoint)
        else:
            try:
                task_id = task.task_id
                logger.info(
                    f"Sending task:{task_id} to endpoint:{dest_endpoint}")
                zmq_task = Task(task_id, task.container, task_payload)
            except TypeError:
                # A TypeError is raised when the Task object can't be recomposed from
                # redis due to missing values during high-workload events.
                logger.exception(f"Unable to access task {task_id} from redis")
                logger.debug(
                    f"Task:{task_id} is now LOST",
                    extra={
                        "log_type": "task_lost",
                        "endpoint_id": dest_endpoint,
                        "task_id": task_id,
                    },
                )
                return 0

            try:
                self.tasks_q.put(dest_endpoint.encode("utf-8"),
                                 zmq_task.pack())
            except (zmq.error.ZMQError, zmq.Again):
                logger.exception(f"Endpoint:{dest_endpoint} is unreachable")
                # put task back in redis since it was not sent to endpoint
                self.redis_pubsub.put(dest_endpoint, task)
                self.disconnect_endpoint(dest_endpoint)
            except Exception:
                logger.exception(
                    f"Caught error while sending {task_id} to {dest_endpoint}")
                # put task back in redis since it was not sent to endpoint
                self.redis_pubsub.put(dest_endpoint, task)
            else:
                self.log_task_transition(task, "dispatched_to_endpoint")
        return 1

    def handle_results(self):
        """Receive incoming results on results_q and update Redis with results

        NOTE: Results can arrive on this queue when the endpoint is in any of the 3
        states (Registered, Connected, Disconnected), and we will accept the results.
        This is because we do not tie the connection status of this queue to the state
        of the endpoint, as doing so could mean rejecting perfectly good results on a
        working ZMQ connection.

        Registered =>
            Getting results in this state means this zmq pipe has opened and results are
            sent over before the connection message has been sent by the endpoint.

        Connected =>
            Getting results in this state is normal, as a connection message has been
            sent and zmq pipes are working.

        Disconnected =>
            Getting results in this state means the endpoint registered and was
            connected, but a zmq send failed over a different pipe, sending the endpoint
            do Disconnected state. The results pipe could still be working, or it
            could've started working again before the connection message is sent again
        """
        try:
            # timeout in ms, when 0 it's nonblocking
            b_ep_id, b_message = self.results_q.get(block=False, timeout=0)
            endpoint_id = b_ep_id.decode("utf-8")

            if b_message == b"HEARTBEAT":
                logger.debug(
                    f"Received heartbeat from {endpoint_id} over results channel",
                    extra={
                        "log_type": "endpoint_heartbeat_received",
                        "endpoint_id": endpoint_id,
                    },
                )
                return

            try:
                message = pickle.loads(b_message)
            except Exception:
                logger.exception(
                    f"Failed to unpickle message from results_q, message:{b_message}"
                )

            if isinstance(message, EPStatusReport):
                logger.debug(
                    "endpoint_status_message",
                    extra={
                        "log_type": "endpoint_status_message",
                        "endpoint_id": endpoint_id,
                        "endpoint_status_message": message.__dict__,
                    },
                )
                # Update endpoint status
                try:
                    self.endpoint_db.put(endpoint_id, message.ep_status)
                except Exception:
                    logger.error(
                        "Caught error while trying to push endpoint status data "
                        "into redis")

                # Update task status from endpoint
                task_status_delta = message.task_statuses
                for task_id, status_code in task_status_delta.items():
                    status = status_code_convert(status_code)

                    logger.debug(
                        f"Updating Task({task_id}) to status={status}")
                    task = RedisTask(self.redis_client, task_id)
                    task.status = status
                return

            if "registration" in message:
                logger.debug(
                    f"Registration message from {message['registration']}")
                return

            # only messages with a result or an exception are processed past this point
            result_or_exception = "result" in message or "exception" in message
            if not result_or_exception:
                logger.warning(
                    "A task result message was received without a result or an "
                    "exception",
                    extra={
                        "endpoint_id": endpoint_id,
                        "endpoint_status_message": message.__dict__,
                    },
                )
                return

            task_id = message["task_id"]

            if not RedisTask.exists(self.redis_client, task_id):
                logger.warning(
                    f"Got result for task that does not exist in redis: {task_id}"
                )
                # if the task does not exist in redis, it may mean it was retrieved by
                # the user and deleted from redis before it could be acked so the
                # endpoint sent another result message. This means we should ack this
                # task_id to prevent the endpoint from continuing to send it
                self.handle_results_ack(endpoint_id, task_id)
                return

            task = RedisTask(self.redis_client, task_id)
            logger.debug(f"Task info : {task}")

            # handle if we get duplicate task ids (if one of the critical sections
            # below did not succeed, the task could never reach an internal state
            # of COMPLETE, meaning we will retry that section)
            try:
                if task.internal_status == InternalTaskState.COMPLETE:
                    logger.debug(
                        f"Duplicate result received for task: {task_id}")
                    # resend results ack in case the previous ack was not received for
                    # this result
                    self.handle_results_ack(endpoint_id, task_id)
                    return
            except ValueError:
                # A ValueError is raised if the task was wiped from redis by a
                # client-fetch
                # We should ack the endpoint so that it can wipe it's local cache
                self.handle_results_ack(endpoint_id, task_id)
                logger.warning(f"ACK requested for task:{task_id} which was "
                               "already fetched by client.")
                return

            # this critical section is where the final task redis data is set,
            # and the task result will not be acked if this fails
            if "result" in message:
                task.status = TaskState.SUCCESS
                self.task_storage.store_result(task, message["result"])
                task.completion_time = time.time()
            elif "exception" in message:
                task.status = TaskState.FAILED
                task.exception = message["exception"]
                task.completion_time = time.time()
            task.set_expire(self.result_ttl)

            # this critical section is where the task ID is sent over RabbitMQ,
            # and the task result will not be acked if this fails
            task_group_id = task.task_group_id
            if task_group_id:
                connection = pika.BlockingConnection(self.rabbitmq_conn_params)
                channel = connection.channel()
                channel.exchange_declare(exchange="tasks",
                                         exchange_type="direct")
                channel.queue_declare(queue=task_group_id)
                channel.queue_bind(task_group_id, "tasks")

                # important: the FuncX client must be capable of receiving the same
                # task_id multiple times, in case this bit succeeds but code below this
                # fails and this must be retried
                channel.basic_publish(exchange="tasks",
                                      routing_key=task_group_id,
                                      body=task_id)
                logger.debug(
                    f"Publishing to RabbitMQ routing key {task_group_id} : {task_id}"
                )
                connection.close()

                self.log_task_transition(task, "result_enqueued")

            # internally, the task is only considered complete when both critical
            # sections above have succeeded (redis data is sent and the task_id is
            # sent over RabbitMQ)
            task.internal_status = InternalTaskState.COMPLETE
            self.handle_results_ack(endpoint_id, task_id)

        except zmq.Again:
            pass
        except Exception:
            logger.exception("Caught exception from results queue")

    def handle_results_ack(self, endpoint_id, task_id):
        if endpoint_id not in self.connected_endpoints:
            logger.warning(
                "Attempting to send results Ack to disconnected endpoint: %s",
                endpoint_id,
                extra={
                    "log_type": "disconnected_ack_attempt",
                    "endpoint_id": endpoint_id,
                    "task_id": task_id,
                },
            )

        # TODO: remove once endpoint version 0.3.* is deprecated
        reg_message = self.connected_endpoints[endpoint_id][
            "registration_message"]
        # if the key funcx_endpoint_version is in the registration message, it means
        # the endpoint version is >=0.3.3, because 0.3.3 is the first endpoint version
        # where we started sending the funcx_endpoint_version key to the forwarder
        if "funcx_endpoint_version" not in reg_message:
            logger.debug(
                f"Ack not sent to endpoint {endpoint_id} for backwards compatability "
                "because it has version <0.3.3")
            return

        msg = ResultsAck(task_id=task_id)
        logger.debug(
            f"Sending Result Ack to endpoint {endpoint_id} for task {task_id}: {msg}",
            extra={
                "log_type": "results_ack",
                "endpoint_id": endpoint_id,
                "task_id": task_id,
            },
        )
        try:
            # send an ack
            self.tasks_q.put(endpoint_id.encode("utf-8"), msg.pack())
        except (zmq.error.ZMQError, zmq.Again):
            logger.exception(f"Endpoint:{endpoint_id} results ack send failed")
            self.disconnect_endpoint(endpoint_id)

    def run(self):
        """Process entry point"""
        try:
            logger.info("[MAIN] Loop starting")
            logger.info("[MAIN] Connecting to redis")
            logger.info(
                f"[MAIN] Forwarder listening for tasks on: {self.tasks_port}")
            logger.info(
                f"[MAIN] Forwarder listening for results on: {self.results_port}"
            )
            logger.info(
                f"[MAIN] Forwarder issuing commands on: {self.commands_port}")

            self.initialize_endpoint_queues()
            self._command_processor_thread = threading.Thread(
                target=self.command_processor,
                args=(self.kill_event, ),
                name="forwarder-command-processor",
            )
            self._command_processor_thread.start()

            while True:
                if self.kill_event.is_set():
                    logger.critical(
                        "Kill event set. Starting termination sequence")
                    # 1. [TODO] Unsubscribe from all
                    # 2. [TODO] Flush all tasks received back to their queues for
                    #           reprocessing.
                    # 3. [TODO] Figure out how we can trigger a scaling event to replace
                    #           lost forwarder?

                # Send heartbeats to every connected manager
                self.heartbeat()

                self.handle_endpoint_connection()
                # [TODO] This step could be in a timed loop. Ideally after we have a
                # perf study
                self.forward_task_to_endpoint()
                self.handle_results()
        except Exception:
            logger.exception("Caught exception while running forwarder")
            sys.exit(-1)