Exemplo n.º 1
0
class SubProcessWrapper:
	cname = __name__ + '.SubProcessWrapper'
	def __init__(self, target, name=None):
		self.target = target
		self.running = False
		self.name = name if name else target.task_name()
		self.kill_event = Event()
		self.logger = logging.getLogger(self.cname)

	def run(self):
		self.logger.info("starting SubProcessTask: {}".format(self.target.task_name()))
		th = Thread(target=self.target, name=self.target.task_name())
		th.start()		
		signal.signal(signal.SIGINT, signal.SIG_IGN)
		self.kill_event.wait()
		self.logger.info("stopping SubProcessTask: {}".format(self.target.task_name()))
		self.target.stop()
		th.join()
		self.logger.info("Stopped SubProcessTask: {}".format(self.target.task_name()))

	def __call__(self):
		self.run()

	def get_kill_event(self):
		return self.kill_event
Exemplo n.º 2
0
    def execute_action(self, action):
        event = Event()
        queue = Queue()
        proc = Process(
            target=execute_action_proc,
            args=(self.execute, action, event, queue))
        proc.start()

        # Send heartbeat.
        heartbeat_retry = 0
        while not event.is_set():
            event.wait(config.ACTIVITY_HEARTBEAT_INTERVAL)
            try:
                res = self.heartbeat(self.task_token)
                if res['cancelRequested']:
                    proc.terminate()
                    proc.join()
                    return Result('cancelled', -1, '', '', '', -1)
            except Exception as err:
                if heartbeat_retry <= config.ACTIVITY_HEARTBEAT_MAX_RETRY:
                    heartbeat_retry += 1
                    continue
                else:
                    proc.terminate()
                    proc.join()
                    raise

        # Evaluate the result.
        result = queue.get_nowait()
        proc.join()
        return result
Exemplo n.º 3
0
    def test_play_and_record(self):
        """
        Verifies that a Device and play back prerecorded events.
        """
        device = evemu.Device(self.get_device_file())
        devnode = device.devnode
        events_file = self.get_events_file()
        # device.record() calls evemu_record() and is thus missing the
        # description that the input file has
        with open(events_file) as e:
            indata = extract_events(strip_comments(e.readlines()))

        recording_started = Event()
        q = Queue()
        record_process = Process(target=record,
                                 args=(recording_started, devnode, q))
        record_process.start()
        recording_started.wait(100)
        device.play(open(events_file))

        outdata = strip_comments(q.get())
        record_process.join()

        self.assertEquals(len(indata), len(outdata))
        fuzz = re.compile("E: \d+\.\d+ (.*)")
        for i in range(len(indata)):
            lhs = fuzz.match(indata[i])
            self.assertTrue(lhs)
            rhs = fuzz.match(outdata[i])
            self.assertTrue(rhs)
            self.assertEquals(lhs.group(1), rhs.group(1))
Exemplo n.º 4
0
class ChildChecker(threading.Thread):
    def __init__(self, killEvent):
        super(ChildChecker, self).__init__()
        self.killEvent = killEvent
        self.event = Event()
        self.process = Process(target=childsPlay, args=(self.event,))

    def run(self):
        self.process.start()

        while not self.killEvent.is_set():
            self.event.wait()
            print "Child checked, and is done playing"
            if raw_input("Do again? y/n:") == "y":
                self.event.clear()
                self.process = Process(target=endlessChildsPlay, args=(self.event,))
                self.process.start()
            else:
                self.cleanChild()
                self.killEvent.set()

    def join(self):
        print "Joining child process"
        # Timeout on 5 seconds
        self.process.join(5)

        if self.process.is_alive():
            print "Child did not join!  Killing.."
            self.process.terminate()
        print "Joining ChildChecker thread"
        super(ChildChecker, self).join()


    def cleanChild(self):
        print "Cleaning up the child..."
Exemplo n.º 5
0
    def test_host_is_down(self):
        from gaw import interface_class, service_class, client_class

        server_start = Event()

        @interface_class(service_name='ServiceInterfaceStyle')
        class Interface(object):
            def ping(self): pass

        def server():
            @service_class
            class Service(Interface):
                def ping(self): return True

            GawServer(ip='0.0.0.0', port=4000).add(Service).run(lambda: server_start.set())

        def client():
            @client_class(ip='localhost', port=4000, retries=0)  # do not retry
            class Client(Interface): pass

            self.assertRaises(Exception, Client().ping)

        p = Process(target=server)
        p.start()

        server_start.wait()
        p.terminate()
        p.join()

        client()
Exemplo n.º 6
0
def pipelineDaemon(pipeline, returnEvent, options=None, programName=None):
    """Launches Pyro server and (if specified by options) pipeline executors"""
    
    #check for valid pipeline 
    if pipeline.runnable.empty()==None:
        print "Pipeline has no runnable stages. Exiting..."
        sys.exit()

    if options.urifile==None:
        options.urifile = os.path.abspath(os.curdir + "/" + "uri")
    
    e = Event()
    process = Process(target=launchServer, args=(pipeline,options,e,))
    process.start()
    e.wait()
    if options.num_exec != 0:
        processes = [Process(target=launchPipelineExecutor, args=(options,programName,)) for i in range(options.num_exec)]
        for p in processes:
            p.start()
        for p in processes:
            p.join()
    
    #Return to calling code if pipeline has no more runnable stages:
    #Event will be cleared once clients are unregistered. 
    while e.is_set():
        time.sleep(5)
    returnEvent.set()
Exemplo n.º 7
0
class ServerProc(object):
    def __init__(self):
        self.proc = None
        self.daemon = None
        self.stop = Event()

    def start(self, init_func, config, paths, port):
        self.proc = Process(target=self.create_daemon, args=(init_func, config, paths, port))
        self.proc.daemon = True
        self.proc.start()

    def create_daemon(self, init_func, config, paths, port):
        try:
            self.daemon = init_func(config, paths, port)
        except socket.error:
            logger.error("Socket error on port %s" % port)
            raise

        if self.daemon:
            self.daemon.start(block=False)
            try:
                self.stop.wait()
            except KeyboardInterrupt:
                pass

    def wait(self):
        self.stop.set()
        self.proc.join()

    def kill(self):
        self.stop.set()
        self.proc.terminate()
        self.proc.join()
Exemplo n.º 8
0
class StoppableProcess(Process):
    exit = None
    sleep = None

    def __init__(self, sleep=1, *args, **kwargs):
        self.exit = Event()
        self.sleep = sleep
        super(StoppableProcess, self).__init__(*args, **kwargs)

    def _setup(self):
        pass

    def _teardown(self):
        pass

    def _ping(self):
        raise NotImplementedError

    def _should_exit(self):
        return self.exit.wait(0)

    def run(self):
        self._setup()
        while True:
            if self._ping() or self.exit.wait(self.sleep * 1.0):
                self._teardown()
                return

    def stop(self):
        self.exit.set()
        self.join(self.sleep)
        if self.is_alive():
            self.terminate()
Exemplo n.º 9
0
    def test_multiple_concurrent_request_on_same_client(self):
        import time
        from multiprocessing.pool import ThreadPool

        send_msg = u'test ฟนำีฟนำีฟนำีฟนำี'
        server_start = Event()

        def on_message(msg):
            self.assertEqual(msg, send_msg)

        def server():
            PostofficeServer(ip='0.0.0.0', port=4000, on_message=on_message, after_start_cb=lambda: server_start.set())

        from multiprocessing import Process
        p = Process(target=server)
        try:
            p.start()

            server_start.wait()

            c = PostofficeClient(ip='localhost', port=4000)

            def client(ith):
                c.send(send_msg)

            pool = ThreadPool(100)
            pool.map(client, [i for i in range(1000)])

            p.terminate()
            p.join()
        except Exception as e:
            # gracefully stop
            p.terminate()
            p.join()
            raise
Exemplo n.º 10
0
    def test_server_client(self):
        import time

        send_msg = u'test ฟนำีฟนำีฟนำีฟนำี'
        server_start = Event()

        def on_message(msg):
            self.assertEqual(msg, send_msg)
            return msg

        def server():
            PostofficeServer(ip='0.0.0.0', port=4000, on_message=on_message, after_start_cb=lambda: server_start.set())

        def client():
            client = PostofficeClient(ip='localhost', port=4000)
            response = client.send(send_msg)
            self.assertEqual(response, send_msg)

        from multiprocessing import Process
        p = Process(target=server)
        try:
            p.start()
            server_start.wait()
            client()
            p.terminate()
            p.join()
        except Exception as e:
            # gracefully stop
            p.terminate()
            p.join()
            raise
Exemplo n.º 11
0
    def test_host_is_down(self):
        from multiprocessing import Event, Process

        server_start = Event()

        def a(path):
            return path

        def b(path, a, b):
            return a + b

        def c(path, a, b):
            return a * b

        def server():
            server = JsonSocketServer(ip='0.0.0.0', port=4000, verbose=True)
            server.register_route(u'a ฟนำี', a)
            server.register_route('b', b)
            server.register_route('c', c)
            server.start(lambda: server_start.set())

        def client():
            client = JsonSocketClient(verbose=True)
            self.assertRaises(Exception, client.request, ip='localhost', port=4000, path=u'a ฟนำี', payload=dict(), retries=0)

        p = Process(target=server)
        p.start()

        server_start.wait()

        p.terminate()
        p.join()

        client()
Exemplo n.º 12
0
    def wait_for(self, key, value):
        d = Manager().dict()
        d[key] = None
        v = Manager().Value('s', ' ')
        e = Event()

        p_state = Process(target=self.state_refresher, args=(self, d, e))
        p_input = Process(target=self.input_waiter, args=(self, v, e))
        p_state.start()
        p_input.start()

        while v.value != 'exit' and dict(d.items())[key] != value:
            e.wait()
            e.clear()

        self.state = d['state']
        self.current = d['current']
        self.enemy = d['enemy']
        self.battlefield = d['battlefield']

        p_state.terminate()
        p_input.terminate()
        curses.endwin()
        p_state.join()
        p_input.join()

        return True if dict(d.items())[key] == value else False
Exemplo n.º 13
0
def throughput(seconds=10,cocurrency=1):
    '''
    seconds should be greater than or equal to 10
    1000w pv = 115 rps
    '''
    stop_flag=Event()
    processes=[]
    t=Timer(seconds,stop,args=[stop_flag])
    q = Queue()
    for i in range(cocurrency):
        processes.append(Process(target=run,args=(q,stop_flag)))
    t.start()
    for p in processes:
        p.start()
    #print 'start waiting for workers:',len(processes)
    stop_flag.wait()
    for t in processes:
        t.join()
    total=err=cost=0
    while not q.empty():
        (req_counter,err_counter,time_cost)=q.get()
        total=total+req_counter
        err=err+err_counter
        cost=cost+time_cost
    cost=cost/total if total>0 else 0

    return total,err,cost
Exemplo n.º 14
0
class StoppableProcess(Process):
    """ Base class for Processes which require the ability
    to be stopped by a process-safe method call
    """

    def __init__(self):
        self._should_stop = Event()
        self._should_stop.clear()
        super(StoppableProcess, self).__init__()

    def join(self, timeout=0):
        """ Joins the current process and forces it to stop after
        the timeout if necessary

        :param timeout: Timeout duration in seconds
        """
        self._should_stop.wait(timeout)
        if not self.should_stop():
            self.stop()
        super(StoppableProcess, self).join(0)

    def stop(self):
        self._should_stop.set()

    def should_stop(self):
        return self._should_stop.is_set()

    def __repr__(self):
        return "<%s(should_stop=%s)>" % (
            self.__class__.__name__, self.should_stop())
Exemplo n.º 15
0
class SharedFile(object):
    def __init__(self, filename):
        self.filename = filename
        self.fevent = Event()
        # self.state = Value('i', 0)
        self.fevent.set()

    def write(self, mode, data):
        # print("Write {}".format(inspect.stack()[1][3]))
        self.wait_freedom_and_lock()

        f = open(self.filename, mode)
        f.write(data)
        f.close
        self.unlock()

    def read(self):
        # print("Read {}".format(inspect.stack()[1][3]))
        self.wait_freedom_and_lock()

        f = open(self.filename, 'r')
        data = f.read()
        f.close
        self.unlock()
        return data

    def wait_freedom_and_lock(self):
        self.fevent.wait()
        self.fevent.clear()
        # return

    def unlock(self):
        self.fevent.set()
Exemplo n.º 16
0
class TestProxyData(TestData):

    def setup(self):
        create_link('dummyX', 'dummy')
        t_url = 'unix://\0%s' % (uuid.uuid4())
        p_url = 'unix://\0%s' % (uuid.uuid4())
        self.connect = Event()
        self.release = Event()

        target = Process(target=_run_remote_uplink,
                         args=(t_url, self.connect, self.release))
        target.daemon = True
        target.start()
        self.connect.wait()
        self.connect.clear()

        proxy = Process(target=_run_remote_uplink,
                        args=(p_url, self.connect, self.release))
        proxy.daemon = True
        proxy.start()
        self.connect.wait()
        self.connect.clear()

        self.ip = IPRoute(do_connect=False)
        link, proxy = self.ip.connect(p_url)
        self.ip.register('bala', proxy)
        link, host = self.ip.connect(t_url, addr=proxy)
        service = self.ip.discover(self.ip.default_target, addr=host)

        self.ip.default_peer = host
        self.ip.default_dport = service

        self.dev = self.ip.link_lookup(ifname='dummyX')
Exemplo n.º 17
0
 def _start_async_server(self):
     self.server = LiveSyncSocketServer(port=self.liveport)
     server_started_event = Event()
     self.server_process = Process(target=self.server.start, args=(server_started_event,))
     self.server_process.daemon = True
     self.server_process.start()
     server_started_event.wait(timeout=0.1)
     return server_started_event.is_set()
Exemplo n.º 18
0
    def start_server(self):
        #use a generic manager's shared dictionary to handle strings and ints
        manager = Manager()
        dictionary = manager.dict()
        dictionary["key"] = ''
        dictionary["finished chunks"] = 0

        # update is an event intended to be set by server to let the UI know that the shared dictionary has been updated.
        update = Event()
        update.clear()

        # shutdown is linked to the server/client shared shutdown command, setting this should shutdown server and clients.
        shutdown = Event()
        shutdown.clear()

        # shared is just an array, each shared variable is added to allow passing all shared values using one variable.
        shared = []
        shared.append(dictionary)
        shared.append(shutdown)
        shared.append(update)

        preset = 8
        if self.settings[preset]["single"] == "False":
            network = True
        else:
            network = False
        self.server = Process(target=Server, args=(self.settings[preset], shared))
        self.server.start()

        print "server pid: %i" % self.server.pid
        # this is a very simple example of how an update loop in the UI classes might work
        while not shutdown.is_set():
            # update is an Event, this means that a process can use the .wait() command to block until it is .set()
            update.wait(timeout=.5)
            print "%i/%i chunks completed." % (dictionary["finished chunks"], dictionary["total chunks"])
            print "Current word: %s" % dictionary["current word"]
            if preset > 8:
                print "%i hashes found." % dictionary["hashes found"]
            os.system('cls' if os.name == 'nt' else 'clear')
            if dictionary["key"] is not '':
                print "Printing key from start_server method: " + dictionary["key"]
                break
            #print "%d chunks completed of %i total." % (dictionary["finished chunks"], dictionary["total chunks"])
            # after UI data has been updated, .clear() the update event so it will wait again in the next iteration
            update.clear()
        shutdown.set()
        time.sleep(1)

        #self.server.terminate()
        self.server.join()
        self.server.terminate()
        #os.kill(self.server.pid, signal.SIGKILL)
        self.server.join()
        manager.shutdown()
        if network:
            print self.server.pid
Exemplo n.º 19
0
    def test_multiple_service(self):
        from gaw import entrypoint

        server_start = Event()

        class A(object):
            name = 'A'

            @entrypoint
            def a(self):
                return 10

        class B(object):
            name = 'B'

            @entrypoint
            def b(self):
                return 20

        class Both(object):
            name = 'Both'

            @entrypoint
            def sum(self):
                client = GawClient(ip='localhost', port=4000)
                A = client.A
                B = client.B
                return A.a() + B.b()

        def server():
            GawServer(ip='0.0.0.0', port=4000).add(A).add(B).add(Both).run(lambda: server_start.set())

        def client():
            conn = GawClient(ip='localhost', port=4000)
            a = conn.A
            b = conn.B
            both = conn.Both

            self.assertEqual(a.a(), 10)
            self.assertEqual(b.b(), 20)
            self.assertEqual(both.sum(), 30)

        p = Process(target=server)
        try:
            p.start()

            server_start.wait()
            client()

            p.terminate()
            p.join()
        except Exception as e:
            # gracefully stop
            p.terminate()
            p.join()
            raise
Exemplo n.º 20
0
 def _test_remote(self, url):
     connect = Event()
     release = Event()
     target = Process(target=_run_remote_uplink,
                      args=(url, connect, release))
     target.daemon = True
     target.start()
     connect.wait()
     ip = IPRoute(host=url)
     ip.release()
     release.set()
Exemplo n.º 21
0
def main():
	# Setup process pooling
	manager = Manager()
	
	# Make queue for processes to grab a thread number from
	# also used so that the last worker knows it's last.
	thread_number = manager.Queue()
	[thread_number.put(t) for t in range(getthreads())]
	
	# Set event for handling worker ready condition
	workers_ready = Event()
	workers_idle = Event()
	workers_idle.set()

	# Set event for handling daemon quit condition for stuff that doesnt exit clean
	daemon_exit = Event()
	
	# Set event for handling new file condition
	new_file = Event()

	# Setup workqueue
	queue = manager.Queue()
	
	print("Starting daemons")
	watcherd = Process(name="watcherd", target=watcher, daemon=True, args=(queue, new_file,))
	watcherd.start()
	
	print("Daemons started.")
	
	print("Initializing workers...")
	workers = Pool(processes=thread_number.qsize(),
				initializer=initworker,
				initargs=(thread_number, workers_ready,))
	workers_ready.wait()
	print("All workers ready")
	
	# Set signal handler on main process
	def exit_handler(signum, frame):
		cleanup(workers, daemon_exit, workers_idle)
	signal(SIGINT, exit_handler)
	signal(SIGTERM, exit_handler)

	# Main loop
	while not daemon_exit.is_set():
		# wait until new file is on queue, forced check every 5 minutes.
		status = new_file.wait(timeout=300.0)
		if status: new_file.clear()
		workers_idle.clear()
		
		# Send to workers
		workers.map(compressors.dyncompress,[queue.get() for q in range(queue.qsize())])
		
		workers_idle.set()
Exemplo n.º 22
0
class EngineManager(ObjectsManager, Process):
    """Uses header values to handle directed requests and results."""

    def __init__(self, *args, **kwargs):
        super(EngineManager, self).__init__(*args, **kwargs)
        self.manager = Manager()
        self.results = self.manager.dict()
        self.block = Event()

    def retrieve(self, jid=None, inputs=False, timeout=None, block=True):
        """Retrieve a job given a jid"""
        if jid is None:
            jid = self.cjid
        if block:
            self.block.wait()
        hdr, func, args, kwargs, rvalue = self.results[jid]
        self.block.clear()
        if inputs:
            return func, args, kwargs, rvalue
        else:
            return rvalue

    def ready(self, jid):
        """Check if a job id is ready."""
        return jid in self.results

    def __getattr__(self, attr):
        """Call a method on the underlying threaded object"""

        def method(*args, **kwargs):
            """A threaded method"""
            jid = id((attr, args, kwargs))
            self.cjid = jid
            self.hdr["jid"] = jid
            self.input.put((self.hdr, attr, args, kwargs))
            return jid

        return method

    def start(self):
        """Start"""
        super(EngineManager, self).start()

    def run(self):
        """Run the subthread to move things off the output."""
        running = True
        while running:
            hdr, func, args, kwargs, rvalue = self.output.get()
            if "stop" in hdr:
                running = False
            else:
                self.results[hdr["jid"]] = (hdr, func, args, kwargs, rvalue)
                self.block.set()
Exemplo n.º 23
0
def processexecute(scenario_path, debug, nb_browser, launch_delay, keep_alive, random_time, headless):
    from multiprocessing import Process
    from multiprocessing import Event
    processes = []
    fire_synchro_end = Event()
    for process_number in xrange(0, nb_browser):
        fire_instanciation_end = Event() 
        process = Process(target=doinprocess, args=(scenario_path, debug, process_number, launch_delay, keep_alive, random_time, headless, fire_instanciation_end, fire_synchro_end,),name='scenario-' + process_number)
        fire_instanciation_end.wait() #todo add timeout
    fire_synchro_end.set(True)
    for process in processes:
        process.join()
Exemplo n.º 24
0
    def test_lock_multiproc(self):
        e = Event()

        @asyncio.coroutine
        def do_async_lock():
            self.assertEqual(False, (yield from self.lock.coro_acquire(False)))
            self.assertEqual(True,
                             (yield from self.lock.coro_acquire(timeout=4)))

        p = Process(target=do_lock_acquire, args=(self.lock, e))
        p.start()
        e.wait()
        self.loop.run_until_complete(do_async_lock())
Exemplo n.º 25
0
class TestRemoteData(TestData):

    def setup(self):
        create_link('dummyX', 'dummy')
        url = 'unix://\0%s' % (uuid.uuid4())
        self.connect = Event()
        self.release = Event()
        target = Process(target=_run_remote_uplink,
                         args=(url, self.connect, self.release))
        target.daemon = True
        target.start()
        self.connect.wait()
        self.ip = IPRoute(host=url)
        self.dev = self.ip.link_lookup(ifname='dummyX')
Exemplo n.º 26
0
class ServerProc(object):
    def __init__(self):
        self.proc = None
        self.daemon = None
        self.stop = Event()

    def start(self, init_func, host, port, paths, routes, bind_address, config,
              ssl_config, **kwargs):
        self.proc = Process(target=self.create_daemon,
                            args=(init_func, host, port, paths, routes, bind_address,
                                  config, ssl_config),
                            kwargs=kwargs)
        self.proc.daemon = True
        self.proc.start()

    def create_daemon(self, init_func, host, port, paths, routes, bind_address,
                      config, ssl_config, **kwargs):
        try:
            self.daemon = init_func(host, port, paths, routes, bind_address, config,
                                    ssl_config, **kwargs)
        except socket.error:
            print("Socket error on port %s" % port, file=sys.stderr)
            raise
        except Exception:
            print(traceback.format_exc(), file=sys.stderr)
            raise

        if self.daemon:
            try:
                self.daemon.start(block=False)
                try:
                    self.stop.wait()
                except KeyboardInterrupt:
                    pass
            except Exception:
                print(traceback.format_exc(), file=sys.stderr)
                raise

    def wait(self):
        self.stop.set()
        self.proc.join()

    def kill(self):
        self.stop.set()
        self.proc.terminate()
        self.proc.join()

    def is_alive(self):
        return self.proc.is_alive()
Exemplo n.º 27
0
 def start_feedback(self, name, port):
     """Starts the given Feedback in a new process."""
     self.logger.debug("Starting new Process...",)
     if self.currentProc:
         self.logger.warning("Trying to start feedback but another one is still running. Killing the old one now and proceed.")
         self.stop_feedback()
     ipcReady = Event()
     self.currentProc = FeedbackProcess(self.pluginController.availablePlugins[name], name, ipcReady, port)
     self.currentProc.start()
     # Wait until the network from the Process is ready, this is necessary
     # since spawning a new process under Windows is very slow.
     self.logger.debug("Waiting for IPC channel to become ready...")
     ipcReady.wait()
     self.logger.debug("IPC channel ready.")
     self.logger.debug("Done starting process.")
Exemplo n.º 28
0
class ProcessWorker(Process, BaseWorker):
    def __init__(self, *args, **kwargs):
        Process.__init__(self)
        BaseWorker.__init__(self, *args, **kwargs)
        self.finish_event = Event()

    def start_work(self):
        self.start()

    def wait_work_finish(self):
        self.finish_event.wait()

    def run(self):
        self._internal_do_work()
        self.finish_event.set()
Exemplo n.º 29
0
class ServerProc(object):
    def __init__(self):
        self.proc = None
        self.daemon = None
        self.stop = Event()

    def start(self, init_func, host, paths, port, bind_hostname, external_config,
              ssl_config):
        self.proc = Process(target=self.create_daemon,
                            args=(init_func, host, paths, port, bind_hostname,
                                  external_config, ssl_config))
        self.proc.daemon = True
        self.proc.start()

    def create_daemon(self, init_func, host, paths, port, bind_hostname,
                      external_config, ssl_config):
        try:
            self.daemon = init_func(host, paths, port, bind_hostname, external_config,
                                    ssl_config)
        except socket.error:
            print >> sys.stderr, "Socket error on port %s" % port
            raise
        except:
            print >> sys.stderr, traceback.format_exc()
            raise

        if self.daemon:
            try:
                self.daemon.start(block=False)
                try:
                    self.stop.wait()
                except KeyboardInterrupt:
                    pass
            except:
                print >> sys.stderr, traceback.format_exc()
                raise

    def wait(self):
        self.stop.set()
        self.proc.join()

    def kill(self):
        self.stop.set()
        self.proc.terminate()
        self.proc.join()

    def is_alive(self):
        return self.proc.is_alive()
Exemplo n.º 30
0
    def test_get_put(self):
        q = aioprocessing.AioQueue()
        e = Event()
        val = 2

        @asyncio.coroutine
        def queue_put():
            yield from q.coro_put(val)

        p = Process(target=queue_get, args=(q, e))
        p.start()
        self.loop.run_until_complete(queue_put())
        e.wait()
        out = q.get()
        p.join()
        self.assertEqual(out, val)
Exemplo n.º 31
0
def render_frame(data_q: Queue, finish_e: Event, renderer_provider):
    renderer = renderer_provider()

    while True:
        try:
            old_state, action, new_state, reward, done = data_q.get(
                timeout=0.1)
            renderer.plot(old_state, action, new_state, reward, done)
            renderer.render()
        except Empty:
            renderer.render()
            if finish_e.wait(0.1):
                break

    print("shut down online rendering !!!")
    def test_deploy_package(self, os_mock):
        """
        Tests asynchronous deploy-package process.
        """

        package_callback_name = "package_callback"
        self.mock_config[package_callback_name] = package_callback_name
        self.mock_package_registar.self.package_exists = Mock(return_value=False)
        self.mock_repository.get_package.return_value = 'abcd'

        class MockDeploymentManager(DeploymentManager):
            def create_mocks(self):
                self._application_creator = Mock()
                self._package_parser = Mock()

        deployment_manager = self._initialize_deployment_manager(MockDeploymentManager)
        deployment_manager.create_mocks()
        deployment_manager.rest_client = Mock()
        # mock status interface:
        on_complete = Event()
        # holds the result of the test, false if test has not finished yet:
        test_result = [None]
        # listen for callbacks from the deployer:
        on_rest_callback = self._create_rest_callback_verifier(expected_statuses=[PackageDeploymentState.DEPLOYING, PackageDeploymentState.DEPLOYED],
                                                               on_complete=on_complete, test_result=test_result)
        deployment_manager.rest_client.post = on_rest_callback
        self.mock_application_registar.set_application_status(self.test_app_name, ApplicationState.STARTED)
        # launch the asynch test
        deployment_manager.deploy_package(self.test_package_name)
        # wait for test to finsish
        on_complete.wait(5)
        self.assertIsNotNone(test_result[0], "async task completed")
        info = test_result[0]
        # check that error  was reported:
        self.assertEquals(info.get("data")[0]["state"], PackageDeploymentState.DEPLOYED)
        self.assertFalse("Error deploying" in info.get("data")[0]["information"])
    def start_feedback(self, name, port):
        """Starts the given Feedback in a new process.

        :param name: Feedback
        :type name: str
        :param port: Parallel Port

        """
        self.logger.debug("Starting new Process...", )
        if self.currentProc:
            self.logger.warning(
                "Trying to start feedback but another one is still running. Killing the old one now and proceed."
            )
            self.stop_feedback()
        ipcReady = Event()
        self.currentProc = FeedbackProcess(
            self.pluginController.availablePlugins[name], name, ipcReady, port)
        self.currentProc.start()
        # Wait until the network from the Process is ready, this is necessary
        # since spawning a new process under Windows is very slow.
        self.logger.debug("Waiting for IPC channel to become ready...")
        ipcReady.wait()
        self.logger.debug("IPC channel ready.")
        self.logger.debug("Done starting process.")
Exemplo n.º 34
0
    def receive(self):
        def _receive(dispatcher: Dispatcher, initialized: Event) -> None:
            for k, v in self._performers.items():
                event = Event()
                dispatcher._running_performers[k] = v.run(event)
                event.wait(
                    3)  # TODO: Do we want to configure this polling interval?

            initialized.set()

            while self._state == DispatcherState.ds_running:
                time.sleep(
                    5
                )  # yield to avoid spinning, between checking for changes to state

        if self._state == DispatcherState.ds_awaiting:
            initialized = Event()
            self._supervisor = Thread(target=_receive,
                                      args=(self, initialized))
            initialized.wait(
                5
            )  # TODO: Should this be number of performs and configured with related?
            self._state = DispatcherState.ds_running
            self._supervisor.start()
Exemplo n.º 35
0
def Trigger(tc):
    stop_ev = Event()
    err_ev = Event()
    fin_ev = Event()
    thr = Process(target=ahs_loop, args=(tc.RF, fin_ev, stop_ev, err_ev,))
    thr.start()
    err = False
    try:
        for _iter in range(tc.iterators.count):
            if err_ev.is_set() is True:
                raise RuntimeError('AHS error')
            api.Logger.info('Run: %d' % _iter)
            api.Logger.info("Issuing server restart")
            ret = api.IpmiNodes([tc.node_name], ipmiMethod='cycle', useNcsi=True)
            if ret != api.types.status.SUCCESS:
                api.Logger.info("server restart failed")
                return api.types.status.FAILURE
            api.Logger.info("server restart done")
            tc.test_node.WaitForHost()
        
        if err_ev.is_set() is True:
            raise RuntimeError('AHS error')
    except:
        api.Logger.error(traceback.format_exc())
        err = True
    finally:
        stop_ev.set()
        api.Logger.info("Waiting for AHS loop to complete")
        fin_ev.wait(timeout=10)
        thr.terminate()
        thr.join()
    
    if err:
        return api.types.status.FAILURE

    return api.types.status.SUCCESS
Exemplo n.º 36
0
class GuiIF():  #用于向guiengine发射信号
    def __init__(self, server_address=None, layout=layout):
        # Q_c2g:传递内容
        #     1. 单字符串:'_q_u_i_t_' -> 结束标志
        #     2. 列表:[stimulus setting, marker] -> 刺激设置,marker标志

        self.Q_c2g = Queue()
        self.E_g2c = Event()
        self.layout = layout
        self.args = {
            'layout': self.layout,
            'Q_c2g': self.Q_c2g,
            'E_g2c': self.E_g2c,
            'server_address': server_address
        }

    def quit(self):
        self.Q_c2g.put('_q_u_i_t_')

    def wait(self):
        self.E_g2c.wait()

    def update(self, stimulus, marker):
        self.Q_c2g.put([stimulus, marker])
Exemplo n.º 37
0
class StopWatch(Process):
    def __init__(self, hotkey=None):
        super().__init__(name='Stop Watch')
        self._stop_event = Event()
        self._stop_event.clear()

        self._waiter = Event()
        self._waiter.clear()

        self._elapsed_time = Value('Q', 0)
        self._hotkey = hotkey or 'space'

        super().start()
        # Waits till the process is started.
        self._stop_event.wait()

    def start(self):
        if self.is_alive:
            self._stop_event.set()
        else:
            raise RuntimeError()

    def join(self, *args, forceStop=False, **kwarks):
        zeroTimer = not self._waiter.is_set()
        if forceStop and self.is_alive():
            self._waiter.set()
        super().join(*args, **kwarks)
        if zeroTimer:
            with self._elapsed_time.get_lock():
                self._elapsed_time.value = 0

    def getValue(self):
        return self._elapsed_time.value or None

    def run(self):
        s, e = None, None

        def hotkey_action():
            if s is not None:
                self._waiter.set()

        remove = add_hotkey(self._hotkey, hotkey_action)
        # Inform the main process that this process is started
        self._stop_event.set()
        # Clears the stop event so that process halts till it is started again with start function.
        self._stop_event.clear()
        self._stop_event.wait()

        s = timer()
        self._waiter.wait()
        e = timer()

        with self._elapsed_time.get_lock():
            self._elapsed_time.value = e - s
        remove_hotkey(remove)
Exemplo n.º 38
0
class ProcessPeer(Peer, Process):
    def __init__(
            self,
            host: Tuple[str, int],
            name: str,
            role: str,
            cert: Tuple[str, str],
            program_hash: str,
            ns: str = None,
            loop_delay: int = 1,
            auto_register: bool = False,
            logger: "logging.Logger" = getLogger(__name__),
    ):
        super().__init__(
            host=host,
            name=name,
            role=role,
            cert=cert,
            program_hash=program_hash,
            ns=ns,
            auto_register=auto_register,
            logger=logger,
        )
        self.loopDelay = loop_delay
        self.stopped = Event()
        self.started = Event()

    def is_start(self) -> bool:
        return self.started.is_set()

    def start(self) -> None:
        super().start()
        self.loop_start()
        self.started.set()

    def stop(self) -> None:
        self.loop_stop()
        self.stopped.set()
        self.started.clear()

    def run(self) -> None:
        while self.stopped.wait(
                self.loopDelay) is False or self.send_queue != {}:
            self.loop()
        self.loop_stop_post()
        sleep(2)
        self.logger.info("{} stopped.".format(self.server_info))
Exemplo n.º 39
0
class TestTagPropagation(unittest.TestCase):

    def setUp(self):
        self._tmp_dir = TemporaryDirectory()
        self._config = initialize_config(self._tmp_dir)
        self.analysis_finished_event = Event()
        self.uid_of_key_file = '530bf2f1203b789bfe054d3118ebd29a04013c587efd22235b3b9677cee21c0e_2048'

        self._mongo_server = MongoMgr(config=self._config, auth=False)
        self.backend_interface = BackEndDbInterface(config=self._config)

        self._analysis_scheduler = AnalysisScheduler(config=self._config, pre_analysis=self.backend_interface.add_object, post_analysis=self.count_analysis_finished_event)
        self._unpack_scheduler = UnpackingScheduler(config=self._config, post_unpack=self._analysis_scheduler.start_analysis_of_object)

    def count_analysis_finished_event(self, fw_object):
        self.backend_interface.add_analysis(fw_object)
        if fw_object.uid == self.uid_of_key_file and 'crypto_material' in fw_object.processed_analysis:
            sleep(1)
            self.analysis_finished_event.set()

    def tearDown(self):
        self._unpack_scheduler.shutdown()
        self._analysis_scheduler.shutdown()

        clean_test_database(self._config, get_database_names(self._config))
        self._mongo_server.shutdown()

        self._tmp_dir.cleanup()
        gc.collect()

    def test_run_analysis_with_tag(self):
        test_fw = Firmware(file_path='{}/container/with_key.7z'.format(get_test_data_dir()))
        test_fw.release_date = '2017-01-01'
        test_fw.scheduled_analysis = ['crypto_material']

        self._unpack_scheduler.add_task(test_fw)

        assert self.analysis_finished_event.wait(timeout=20)

        processed_fo = self.backend_interface.get_object(self.uid_of_key_file, analysis_filter=['crypto_material'])
        assert processed_fo.processed_analysis['crypto_material']['tags'], 'no tags set in analysis'

        processed_fw = self.backend_interface.get_object(test_fw.uid, analysis_filter=['crypto_material'])
        assert processed_fw.analysis_tags, 'tags not propagated properly'
        assert processed_fw.analysis_tags['crypto_material']['private_key_inside']
Exemplo n.º 40
0
class SafeEvent(object):
    __thread_pool = SingletonThreadPool()

    def __init__(self):
        self._event = ProcessEvent()

    def is_set(self):
        return self._event.is_set()

    def set(self):
        if not BackgroundMonitor.is_subprocess_enabled() or BackgroundMonitor.is_subprocess_alive():
            self._event.set()
        # SafeEvent.__thread_pool.get().apply_async(func=self._event.set, args=())

    def clear(self):
        return self._event.clear()

    def wait(self, timeout=None):
        return self._event.wait(timeout=timeout)
Exemplo n.º 41
0
    def __enter__(self):
        ready = Event()

        def target():
            async def callback_notify():
                # Run periodically by the Uvicorn server.
                ready.set()

            uvicorn.run(self.app,
                        callback_notify=callback_notify,
                        **self.kwargs)

        self._process = Process(target=target)
        self._process.start()

        if not ready.wait(self.ready_timeout):  # pragma: no cover
            raise TimeoutError(
                f"Live server not ready after {self.ready_timeout} seconds")

        return self
Exemplo n.º 42
0
def registerhook(hookconf):
    """Start and register a hook

    :param hookconf: A dict with the config of the hook to register
    """
    rtn = False
    hookcfg = dict(hookconf)
    hookclass = config.getHookType(hookcfg['hooktype'])
    hookqueue = Queue()
    hookname = hookcfg[config.HOOKNAME]
    hooklevel = hookcfg[config.LOGLEVEL]
    hookevent = Event()
    hookprocess = hookclass(hookconf,
                            logobject.getLoggerHandle(hookname, hooklevel),
                            hookqueue, hookevent)
    hookprocess.start()
    if hookevent.wait(2.0):
        registeredhooks.append((hookname, hookcfg, hookprocess, hookqueue))
        rtn = True
    else:
        loghandle.critical("Failed to start hook process {}".format(hookname))
        rtn = False
    return rtn
Exemplo n.º 43
0
def lock_and_call(callback, path_to_lock):
    """
    Grab a lock on path_to_lock from a foreign process then execute the callback.
    :param callable callback: object to call after acquiring the lock
    :param str path_to_lock: path to file or directory to lock
    """
    # Reload certbot.util module to reset internal _LOCKS dictionary.
    reload_module(util)

    emit_event = Event()
    receive_event = Event()
    process = Process(target=_handle_lock, args=(emit_event, receive_event, path_to_lock))
    process.start()

    # Wait confirmation that lock is acquired
    assert receive_event.wait(timeout=10), 'Timeout while waiting to acquire the lock.'
    # Execute the callback
    callback()
    # Trigger unlock from foreign process
    emit_event.set()

    # Wait for process termination
    process.join(timeout=10)
    assert process.exitcode == 0
Exemplo n.º 44
0
    def startProcess(self):
        if self._process:
            # This should almost never happen (but it does)
            # Make sure the previous process is killed before a new one is started
            self._logger.warn(
                "A previous process was still running, killing it")
            self._kill()

        onListeningEvent = Event()
        errorState = Value(
            'b',
            False)  #If True, it means the process had an error while starting
        self._listening = False

        self._process = Process(
            target=startPipelineProcess,
            args=(self._device, self._size, self._rotation, self._source,
                  self._encoding, onListeningEvent, errorState,
                  (self._parentConn, self._processConn),
                  settings().getInt(['camera', 'debug-level'])))
        self._process.daemon = True
        self._process.start()
        if onListeningEvent.wait(20.0):
            if errorState.value:
                self._logger.error('Pipeline Failed to start.')
                self._kill()
                self._logger.debug('Pipeline Process killed.')

            else:
                self._logger.debug('Pipeline Process Started.')
                self._listening = True

        else:
            self._logger.debug(
                'Timeout while waiting for pipeline process to start')
            self._kill()
class TestFileAddition(unittest.TestCase):
    @patch('unpacker.unpack.FS_Organizer', MockFSOrganizer)
    def setUp(self):
        self._tmp_dir = TemporaryDirectory()
        self._config = initialize_config(self._tmp_dir)
        self.elements_finished_analyzing = Value('i', 0)
        self.analysis_finished_event = Event()
        self.compare_finished_event = Event()

        self._mongo_server = MongoMgr(config=self._config, auth=False)
        self.backend_interface = BackEndDbInterface(config=self._config)

        self._analysis_scheduler = AnalysisScheduler(
            config=self._config,
            post_analysis=self.count_analysis_finished_event)
        self._unpack_scheduler = UnpackingScheduler(
            config=self._config, post_unpack=self._analysis_scheduler.add_task)
        self._compare_scheduler = CompareScheduler(
            config=self._config, callback=self.trigger_compare_finished_event)

    def count_analysis_finished_event(self, fw_object):
        self.backend_interface.add_analysis(fw_object)
        self.elements_finished_analyzing.value += 1
        if self.elements_finished_analyzing.value == 4 * 2 * 2:  # 2 container with 3 files each and 2 plugins
            self.analysis_finished_event.set()

    def trigger_compare_finished_event(self):
        self.compare_finished_event.set()

    def tearDown(self):
        self._compare_scheduler.shutdown()
        self._unpack_scheduler.shutdown()
        self._analysis_scheduler.shutdown()

        clean_test_database(self._config, get_database_names(self._config))
        self._mongo_server.shutdown()

        self._tmp_dir.cleanup()
        gc.collect()

    def test_unpack_analyse_and_compare(self):
        test_fw_1 = Firmware(
            file_path='{}/container/test.zip'.format(get_test_data_dir()))
        test_fw_1.release_date = '2017-01-01'
        test_fw_2 = Firmware(
            file_path='{}/regression_one'.format(get_test_data_dir()))
        test_fw_2.release_date = '2017-01-01'

        self._unpack_scheduler.add_task(test_fw_1)
        self._unpack_scheduler.add_task(test_fw_2)

        self.analysis_finished_event.wait(timeout=20)

        compare_id = normalize_compare_id(';'.join(
            [fw.uid for fw in [test_fw_1, test_fw_2]]))

        self.assertIsNone(
            self._compare_scheduler.add_task((compare_id, False)),
            'adding compare task creates error')

        self.compare_finished_event.wait(timeout=10)

        with ConnectTo(CompareDbInterface, self._config) as sc:
            result = sc.get_compare_result(compare_id)

        self.assertEqual(result['plugins']['Software'],
                         self._expected_result()['Software'])
        self.assertCountEqual(
            result['plugins']['File_Coverage']['files_in_common'],
            self._expected_result()['File_Coverage']['files_in_common'])

    @staticmethod
    def _expected_result():
        return {
            'File_Coverage': {
                'files_in_common': {
                    'all': [],
                    'collapse': False
                }
            },
            'Software': {
                'Compare Skipped': {
                    'all':
                    'Required analysis not present: [\'software_components\', \'software_components\']'
                }
            }
        }
Exemplo n.º 46
0
class DataLogger(Process):
    def __init__(self, controlPipe, dataPipe, workerPipe):

        super(DataLogger, self).__init__()
        #threading.Thread.__init__(self)
        self.__logName = None  # File to log to
        self.__logFrequency = 1  # Time per second to write to the log file
        self.__logHeadings = []  # List of headings for the log file.
        self.__running = False  # Internal running flag
        self.__paused = True  # Internal paused flag
        self.__logPath = './'  # Path for logfile. Default to current directory
        self.deamon = True  # Sets the process to daemon. Stops if the parent process stops
        self.name = 'Logger'  # Sets the process name. Helps with debigging.
        self.__pipes = {}
        self.__pipes['APPLICATION'] = PipeWatcher(
            self, controlPipe, 'APP->LOGGER'
        )  # Communication pipe with the controlling application
        self.__pipes['DATA'] = PipeWatcher(
            self, dataPipe, 'COLLECTOR->LOGGER'
        )  # Comminication pipe with the Data Collector process
        self.__pipes['WORKER'] = PipeWatcher(
            self, workerPipe, 'WORKER->LOGGER'
        )  # Comminication pipe with the Data Collector process
        self.__data = dict()  # Data dictionary to use to write log diles
        self.__refreshRequired = True  # Flag to determine if the dictionary needs to be refreshed
        self.__refreshRequested = False  # Flag to determine if the refresh request has been sent
        self.__logFormat = '%Y%m%d%H%M'  # Log file name format
        self.__pauseLog = False  # Flag to pause logging
        self.__pid = None  # Pricess ID of Logging process
        self.__snapshot_ready = Event()

        logger.debug('Logging process initalised')

    def run(self):
        self.__running = True
        logger.info('Starting Logger process on PID {}'.format(self.pid))
        for p in self.__pipes:
            self.__pipes[p].start()
        timer = time()
        while self.__running:
            try:
                if not self.__paused:
                    if self.__logName is None:
                        logger.debug('Logger name not set. Pausing')
                        self.pause()
                    #logger.debug('Running: {}, Paused: {}, Required: {}, Requested: {}'.format(self.__running, self.__paused, self.__refreshRequired, self.__refreshRequested))
                    if self.__refreshRequired:
                        #logger.debug('Getting snapshot')
                        self.__pipes['DATA'].send(Message('SNAPSHOT'))
                        self.__snapshot_ready.wait()
                        self.__snapshot_ready.clear()
                    line = ''
                    #logger.debug('Recording data')
                    for l in self.__logHeadings:
                        if l in self.__data:
                            line += str(self.__data[l]['LOG']).strip() + ','
                        else:
                            #logger.debug('{} is not in snapshot'.format(l))
                            line += '-,'
                    with open(self.__logName + '.log', 'ab') as f:
                        f.write(bytes(line[:len(line) - 1] + '\n', 'UTF-8'))
                    self.__refreshRequired = True
                sleeptime = (1.0 / self.__logFrequency) - (time() - timer)
                if sleeptime < 0:
                    logger.warning(
                        'Logger sleep time reached zero. Concider reducing log frequency'
                    )
                    #self.logFrequency-=1
                else:
                    sleep(sleeptime)
                timer = time()
            except (KeyboardInterrupt, SystemExit):
                self.__running = False
                continue
            except:
                logger.critical(
                    'Unhandled exception occured in Logger process: ',
                    exc_info=True,
                    stack_info=True)
                continue
        logger.info('Logging process stopped')

    def stop(self, p=None):
        #Stop logging.    Thread stops - This is final.    Cannot be restarted
        self.__running = False

    def resume(self, p=None):
        #Resume Logging
        logger.info('Logging resumed')
        self.__paused = self.__pauseLog = False
        if self.__logName is None:
            self.__setName()

    def pause(self, p=None):
        #Pause logging, thread keeps running
        if not self.__paused:
            logger.info('Logging paused')
            self.__paused = True

    def frequency(self, p):
        self.__logFrequency = p['FREQUENCY']

    def logpath(self, p):
        self.__logPath = p['PATH']

    def logname(self, p):
        return Message('LOG_NAME', NAME=self.__logName)

    def snap_shot(self, p):
        self.__data = p['SNAPSHOT']
        self.__refreshRequired = False
        self.__snapshot_ready.set()

    def __setName(self):
        if self.__logHeadings == []:
            logger.warning(
                'No column headings have been set.    No log file started.')
        else:
            self.__logName = (self.__logPath +
                              datetime.now().strftime(self.__logFormat))
            logger.info('Logging started - output: {}.log'.format(
                self.__logName))
            line = ''
            for l in self.__logHeadings:
                line += l + ','
            with open(self.__logName + '.log',
                      'wb') as f:  # Clobber output file if it exists
                f.write(bytes(line[:len(line) - 1] + '\n', 'UTF-8'))

#    def headings(self, p):
#        #Set the log headings
#        self.__logHeadings = p['HEADINGS']

    def save(self, p=None):
        #Compress the logfile
        if not self.__paused:
            self.pause()
        if self.__logName is None:
            return
        try:
            with open(self.__logName + '.log', 'rb') as f:
                with gzip.open(self.__logName + '.log.gz', 'wb') as z:
                    shutil.copyfileobj(f, z)
            os.remove(self.__logName + '.log')
            self.__logName = None
        except:
            logger.error('Error compressing log file {}'.format(
                self.__logName),
                         exc_info=True)
            self.__logName = None

    def discard(self, p=None):
        #Delete the logfile
        if self.__paused:
            self.__paused = True
        try:
            logger.debug('Discarding log file {}.log'.format(self.__logName))
            os.remove(self.__logName + '.log')
        except:
            logger.warning('Could not delete log file {}.log'.format(
                self.__logname),
                           exc_info=True)
        self.__logName = None

    def getstatus(self, p=None):
        d = dict()
        d['Running'] = self.__running
        d['Paused'] = self.__paused
        d['Log Name'] = self.__logName
        d['Frequency'] = self.__logFrequency
        d['Log Path'] = self.__logPath
        d['Headings'] = self.__logHeadings
        return Message('LOGSTATUS', STATUS=d)

    def add_headings(self, p):
        insert = False
        if 'INSERT' in p:
            insert = p['INSERT']
        for a in range(len(p['HEADINGS'])):
            if p['HEADINGS'][a] not in self.__logHeadings:
                if insert:
                    self.__logHeadings.insert(a, p['HEADINGS'][a])
                else:
                    self.__logHeadings.append(p['HEADINGS'][a])
        logger.info('Log headings updated to {}'.format(self.__logHeadings))

    def remove_headings(self, p):
        for h in p['HEADINGS']:
            if h in self.__logHeadings:
                self.__logHeadings.remove(h)
        logger.info('Log headings updated to {}'.format(self.__logHeadings))
Exemplo n.º 47
0
class MultiProcessConsumer(Consumer):
    """
    A consumer implementation that consumes partitions for a topic in
    parallel using multiple processes

    client: a connected KafkaClient
    group: a name for this consumer, used for offset storage and must be unique
    topic: the topic to consume

    auto_commit: default True. Whether or not to auto commit the offsets
    auto_commit_every_n: default 100. How many messages to consume
                         before a commit
    auto_commit_every_t: default 5000. How much time (in milliseconds) to
                         wait before commit
    num_procs: Number of processes to start for consuming messages.
               The available partitions will be divided among these processes
    partitions_per_proc: Number of partitions to be allocated per process
               (overrides num_procs)

    Auto commit details:
    If both auto_commit_every_n and auto_commit_every_t are set, they will
    reset one another when one is triggered. These triggers simply call the
    commit method on this class. A manual call to commit will also reset
    these triggers
    """
    def __init__(self, client, group, topic, auto_commit=True,
                 auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
                 auto_commit_every_t=AUTO_COMMIT_INTERVAL,
                 num_procs=1, partitions_per_proc=0):

        # Initiate the base consumer class
        super(MultiProcessConsumer, self).__init__(client, group, topic,
                                    partitions=None,
                                    auto_commit=auto_commit,
                                    auto_commit_every_n=auto_commit_every_n,
                                    auto_commit_every_t=auto_commit_every_t)

        # Variables for managing and controlling the data flow from
        # consumer child process to master
        self.queue = Queue(1024)    # Child consumers dump messages into this
        self.start = Event()        # Indicates the consumers to start fetch
        self.exit = Event()         # Requests the consumers to shutdown
        self.pause = Event()        # Requests the consumers to pause fetch
        self.size = Value('i', 0)   # Indicator of number of messages to fetch

        partitions = self.offsets.keys()

        # If unspecified, start one consumer per partition
        # The logic below ensures that
        # * we do not cross the num_procs limit
        # * we have an even distribution of partitions among processes
        if not partitions_per_proc:
            partitions_per_proc = round(len(partitions) * 1.0 / num_procs)
            if partitions_per_proc < num_procs * 0.5:
                partitions_per_proc += 1

        # The final set of chunks
        chunker = lambda *x: [] + list(x)
        chunks = map(chunker, *[iter(partitions)] * int(partitions_per_proc))

        self.procs = []
        for chunk in chunks:
            chunk = filter(lambda x: x is not None, chunk)
            proc = Process(target=self._consume, args=(chunk,))
            proc.daemon = True
            proc.start()
            self.procs.append(proc)

    def _consume(self, partitions):
        """
        A child process worker which consumes messages based on the
        notifications given by the controller process
        """

        # Make the child processes open separate socket connections
        self.client.reinit()

        # We will start consumers without auto-commit. Auto-commit will be
        # done by the master controller process.
        consumer = SimpleConsumer(self.client, self.group, self.topic,
                                  partitions=partitions,
                                  auto_commit=False,
                                  auto_commit_every_n=None,
                                  auto_commit_every_t=None)

        # Ensure that the consumer provides the partition information
        consumer.provide_partition_info()

        while True:
            # Wait till the controller indicates us to start consumption
            self.start.wait()

            # If we are asked to quit, do so
            if self.exit.is_set():
                break

            # Consume messages and add them to the queue. If the controller
            # indicates a specific number of messages, follow that advice
            count = 0

            for partition, message in consumer:
                self.queue.put((partition, message))
                count += 1

                # We have reached the required size. The controller might have
                # more than what he needs. Wait for a while.
                # Without this logic, it is possible that we run into a big
                # loop consuming all available messages before the controller
                # can reset the 'start' event
                if count == self.size.value:
                    self.pause.wait()
                    break

            # In case we did not receive any message, give up the CPU for
            # a while before we try again
            if count == 0:
                time.sleep(0.1)

        consumer.stop()

    def stop(self):
        # Set exit and start off all waiting consumers
        self.exit.set()
        self.pause.set()
        self.start.set()

        for proc in self.procs:
            proc.join()
            proc.terminate()

        super(MultiProcessConsumer, self).stop()

    def __iter__(self):
        """
        Iterator to consume the messages available on this consumer
        """
        # Trigger the consumer procs to start off.
        # We will iterate till there are no more messages available
        self.size.value = 0
        self.pause.set()

        while True:
            self.start.set()
            try:
                # We will block for a small while so that the consumers get
                # a chance to run and put some messages in the queue
                # TODO: This is a hack and will make the consumer block for
                # at least one second. Need to find a better way of doing this
                partition, message = self.queue.get(block=True, timeout=1)
            except Empty:
                break

            # Count, check and commit messages if necessary
            self.offsets[partition] = message.offset
            self.start.clear()
            yield message

            self.count_since_commit += 1
            self._auto_commit()

        self.start.clear()

    def get_messages(self, count=1, block=True, timeout=10):
        """
        Fetch the specified number of messages

        count: Indicates the maximum number of messages to be fetched
        block: If True, the API will block till some messages are fetched.
        timeout: If None, and block=True, the API will block infinitely.
                 If >0, API will block for specified time (in seconds)
        """
        messages = []

        # Give a size hint to the consumers. Each consumer process will fetch
        # a maximum of "count" messages. This will fetch more messages than
        # necessary, but these will not be committed to kafka. Also, the extra
        # messages can be provided in subsequent runs
        self.size.value = count
        self.pause.clear()

        while count > 0:
            # Trigger consumption only if the queue is empty
            # By doing this, we will ensure that consumers do not
            # go into overdrive and keep consuming thousands of
            # messages when the user might need only a few
            if self.queue.empty():
                self.start.set()

            try:
                partition, message = self.queue.get(block, timeout)
            except Empty:
                break

            messages.append(message)

            # Count, check and commit messages if necessary
            self.offsets[partition] = message.offset
            self.count_since_commit += 1
            self._auto_commit()
            count -= 1

        self.size.value = 0
        self.start.clear()
        self.pause.set()

        return messages
Exemplo n.º 48
0
from multiprocessing import Event

e = Event()
print(e.is_set())

# print('1111111')
# e.wait()
#
# print('22222222')
# e.wait()

e.set()

print('333333')
print(e.is_set())
e.wait()

print('444444')
e.clear()
print(e.is_set())
e.wait()

print(e.is_set())
Exemplo n.º 49
0
class ValkkaProcess(Process):
    """
    Semantics:

    Frontend: the part of the forked process that keeps running in the current, user virtual memory space
    Backend : the part of the forked process that runs in its own virtual memory space (e.g. "in the background")

    This class has both backend and frontend methods:

    - Backend methods should only be called from backend.  They are designated with "_".
    - Frontend methods should only be called from frontend

    To avoid confusion, backend methods are designated with "_", except for the "run()" method, that's always in the backend

    Frontend methods use a pipe to send a signal to backend that then handles the signal with a backend method having the same name (but with "_" in the end)

    Backend methods can, in a similar fashion, send signals to the frontend using a pipe.  In frontend, a listening thread is needed.   That thread can then call
    the handleSignal method that chooses the correct frontend method to call

    TODO: add the possibility to bind the process to a certain processor
    """

    # incoming signals : from frontend to backend
    incoming_signal_defs = {  # each key corresponds to a front- and backend methods
        "test_": {
            "test_int": int,
            "test_str": str
        },
        "stop_": []
    }

    # outgoing signals : from back to frontend.  Don't use same names as for
    # incoming signals ..
    outgoing_signal_defs = {
        "test_o": {
            "test_int": int,
            "test_str": str
        },
    }

    def __init__(self, name, affinity=-1, **kwargs):
        super().__init__()
        self.pre = self.__class__.__name__ + " : " + name + \
            " : "  # auxiliary string for debugging output
        self.name = name
        self.affinity = affinity
        self.signal_in = Event()
        self.signal_out = Event()
        # communications pipe.  Frontend uses self.pipe, backend self.childpipe
        self.pipe, self.childpipe = Pipe()

        self.signal_in.clear()
        self.signal_out.clear()

        # print(self.pre, "init")

    def getPipe(self):
        """Returns communication pipe for front-end
        """
        return self.pipe

    def preRun_(self):
        """After the fork, but before starting the process loop
        """
        if (self.affinity > -1):
            os.system("taskset -p -c %d %d" % (self.affinity, os.getpid()))

    def postRun_(self):
        """Just before process exit
        """
        print(self.pre, "post: bye!")

    def cycle_(self):
        # Do whatever your process should be doing, remember timeout every now
        # and then
        time.sleep(5)
        print(self.pre, "hello!")

    def startAsThread(self):
        from threading import Thread
        t = Thread(target=self.run)
        t.start()

    def run(self
            ):  # No "_" in the name, but nevertheless, running in the backed
        """After the fork. Now the process starts running
        """
        # print(self.pre," ==> run")

        self.preRun_()
        self.running = True

        while (self.running):
            self.cycle_()
            self.handleSignal_()

        self.postRun_()

    def handleSignal_(self):
        """Signals handling in the backend
        """
        if (self.signal_in.is_set()):
            signal_dic = self.childpipe.recv()
            method_name = signal_dic.pop("name")
            method = getattr(self, method_name)
            method(**signal_dic)
            self.signal_in.clear()
            self.signal_out.set()

    def sendSignal(self, **kwargs
                   ):  # sendSignal(name="test",test_int=1,test_str="kokkelis")
        """Incoming signals: this is used by frontend methods to send signals to the backend
        """
        try:
            name = kwargs.pop("name")
        except KeyError:
            raise (AttributeError("Signal name missing"))

        # a dictionary: {"parameter_name" : parameter_type}
        model = self.incoming_signal_defs[name]

        for key in kwargs:
            # raises error if user is using undefined signal
            model_type = model[key]
            parameter_type = kwargs[key].__class__
            if (model_type == parameter_type):
                pass
            else:
                raise (AttributeError("Wrong type for parameter " + str(key)))

        kwargs["name"] = name

        self.pipe.send(kwargs)
        self.signal_out.clear()
        self.signal_in.set()  # indicate that there is a signal
        self.signal_out.wait()  # wait for the backend to clear the signal

    def handleSignal(self, signal_dic):
        """Signal handling in the frontend
        """
        method_name = signal_dic.pop("name")
        method = getattr(self, method_name)
        method(**signal_dic)

    def sendSignal_(self, **kwargs):  # sendSignal_(name="test_out",..)
        """Outgoing signals: signals from backend to frontend
        """
        try:
            name = kwargs.pop("name")
        except KeyError:
            raise (AttributeError("Signal name missing"))

        # a dictionary: {"parameter_name" : parameter_type}
        model = self.outgoing_signal_defs[name]

        for key in kwargs:
            # raises error if user is using undefined signal
            try:
                model_type = model[key]
            except KeyError:
                print("your outgoing_signal_defs for", name, "is:", model)
                print("you requested key:", key)
                raise
            parameter_type = kwargs[key].__class__
            if (model_type == parameter_type):
                pass
            else:
                raise (AttributeError("Wrong type for parameter " + str(key)))

        kwargs["name"] = name

        self.childpipe.send(kwargs)

    # *** backend methods corresponding to each incoming signals ***

    def stop_(self):
        self.running = False

    def test_(self, test_int=0, test_str="nada"):
        print(self.pre, "test_ signal received with", test_int, test_str)

    # ** frontend methods corresponding to each incoming signal: these communicate with the backend via pipes **

    def stop(self):
        self.sendSignal(name="stop_")

    def test(self, **kwargs):
        dictionaryCheck(self.incoming_signal_defs["test_"], kwargs)
        kwargs["name"] = "test_"
        self.sendSignal(**kwargs)

    # ** frontend methods corresponding to each outgoing signal **

    # typically, there is a QThread in the frontend-side reading the process pipe
    # the QThread reads kwargs dictionary from the pipe, say
    # {"name":"test_o", "test_str":"eka", "test_int":1}
    # And calls handleSignal(kwargs)

    def test_o(self, **kwargs):
        pass
Exemplo n.º 50
0
class objrThread(threading.Thread):
    dl_detector = None

    def __init__(self, filePath, update_signal, finish_signal):
        super(objrThread, self).__init__()
        self.isWorking = False
        self.update_signal = update_signal
        self.finish_signal = finish_signal
        self.model_path = 'model_data/trained_weights_final.h5'  # model path or trained weights path
        self.classes_path = 'model_data/newclothclass.txt'
        self.anchors_path = 'model_data/yolo_anchors.txt'

        classes_path = os.path.expanduser(self.classes_path)
        with open(classes_path) as f:
            class_names = f.readlines()
        self.class_names = [c.strip() for c in class_names]

        # hsv_tuples = [(x / len(self.class_names), 1., 1.)
        #               for x in range(len(self.class_names))]
        # self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
        # self.colors = list(
        #     map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
        #         self.colors))
        # np.random.seed(10101)  # Fixed seed for consistent colors across runs.
        # np.random.shuffle(self.colors)  # Shuffle colors to decorrelate adjacent classes.
        # np.random.seed(None)  # Reset seed to default

        self.colors = [(0, 0, 255), (0, 255, 127), (173, 222, 255),
                       (18, 153, 255), (214, 112, 218), (255, 153, 18),
                       (255, 0, 0)]

        self.lastepochFlag = False
        self.isWorking = True

        if filePath == "":
            self.filePath = Manager().Value(
                c_char_p, "E:/YJF/hotelvideo/video17.MP4")  # 共享字符串变量
        else:
            self.filePath = Manager().Value(c_char_p, filePath)  # 共享字符串变量

        self.stopMainProcSignal = Event()

        self.input_queue = mpQueue()
        self.camereStartSignal = Event()
        self.camereNotPauseSignal = Event()
        self.camereStopSignal = Event()
        self.camereNewinputSignal = Event()
        self.camereInputendSignal = Event()
        self.detDoneSignal = Event()
        self.newRltSignal = Event()
        self.cameraProc = CameraProc(
            self.filePath, self.input_queue, self.camereStartSignal,
            self.camereNotPauseSignal, self.camereStopSignal,
            self.camereNewinputSignal, self.camereInputendSignal,
            self.detDoneSignal, self.newRltSignal)

        self.rlt_queue = mpQueue()
        self.detproc = DetectorProc(self.input_queue, self.rlt_queue,
                                    self.camereNewinputSignal,
                                    self.newRltSignal, self.detDoneSignal,
                                    self.model_path, self.classes_path,
                                    self.anchors_path)

        self.areaClass = ['toilet', 'sink', 'desktop']

        self.errdict = {}
        self.corrList = []
        self.corrDict = {}

        self.testTotalError = []
        self.testTotalCorr = []
        self.testDelError = []
        self.testDelCorr = []
        self.f = open('test.txt', 'a')
        # self.f = open('test.txt')

    def _run(self):
        while True:
            print('tun in sub_thread')
            time.sleep(1)

    def stopMainThread(self):
        self.isWorking = False
        self.cameraProc.terminate()
        self.detproc.terminate()
        self.newRltSignal.set()
        self.stopMainProcSignal.set()
        print('stop')

    def Pause(self):
        self.camereNotPauseSignal.clear()

    def breakPause(self):
        self.camereNotPauseSignal.set()

    def startCamera(self, filePath):
        self.filePath.value = filePath
        self.filename = filePath.split('/')[-1].split('.')[0]
        self.f = open('./rlt_txt/' + self.filename + '.txt', 'a')
        self.camereInputendSignal.clear()
        self.camereStopSignal.clear()
        self.camereNotPauseSignal.set()
        self.detDoneSignal.set()
        self.camereStartSignal.set()

    def stopCamera(self):
        self.camereStartSignal.clear()
        self.camereStopSignal.set()

    def setFilePath(self, filePath):
        self.filePath.value = filePath

    def label_match(self, arealabel, clothlabel):
        if (arealabel == 'sink' and clothlabel == 'lightblue') or \
            (arealabel == 'toilet' and clothlabel == 'pink') or \
            (arealabel == 'desktop' and clothlabel == 'orange'):
            return True
        else:
            return False

    def bbox_inter_area(self, box1, box2):
        # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
        box2 = box2.transpose()

        b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
        b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]

        # Intersection area
        inter_area = np.maximum(np.minimum(b2_x2 , np.full(b2_x2.shape, b1_x2)) - np.maximum(b2_x1 , np.full(b2_x1.shape, b1_x1)), 0) * \
                     np.maximum(np.minimum(b2_y2 , np.full(b2_y2.shape, b1_y2)) - np.maximum(b2_y1 ,  np.full(b2_y1.shape, b1_y1)), 0)
        return inter_area

    def run(self):
        self.detproc.daemon = True
        self.cameraProc.daemon = True
        self.cameraProc.start()
        self.detproc.start()

        # showcount = 0

        while self.isWorking:

            if self.camereInputendSignal.is_set():
                self.camereInputendSignal.clear()
                self.finish_signal.emit()
                self.f.close()

            self.newRltSignal.wait()
            while not self.rlt_queue.empty():

                # showcount += 1

                out_boxes, out_scores, out_classes, image, frameIndex = self.rlt_queue.get(
                )

                tmp = cv2.cvtColor(np.asarray(image), cv2.COLOR_BGR2RGB)
                image = Image.fromarray(tmp)

                # for  i in range(out_boxes.shape[0]):
                #     for j in range(4):
                #         self.f.write(str(out_boxes[i][j]))
                #         if not (i == out_boxes.shape[0]-1 and j==3):
                #             self.f.write(',')
                # self.f.write(';')
                # for i in range(out_scores.shape[0]):
                #     self.f.write(str(out_scores[i]))
                #     if i <out_scores.shape[0]-1:
                #         self.f.write(',')
                # self.f.write(';')
                # for i in range(out_classes.shape[0]):
                #     self.f.write(str(out_classes[i]))
                #     if i <out_classes.shape[0]-1:
                #         self.f.write(',')
                # self.f.write('\n')

                # line = self.f.readline().split(';')
                # tmp = []
                # for item in line[0].split(','):
                #     tmp.append(float(item))
                # boxs = np.array(tmp).reshape((-1, 4))
                #
                # tmp =[]
                # for item in line[1].split(','):
                #     tmp.append(float(item))
                # scores = np.array(tmp)
                #
                # tmp = []
                # for item in line[2].split(','):
                #     tmp.append(int(item))
                # classes = np.array(tmp)

                font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
                                          size=np.floor(3e-2 * image.size[1] +
                                                        0.5).astype('int32'))
                thickness = (image.size[0] + image.size[1]) // 300

                cloths_box = np.empty(shape=[0, 4])
                areas_box = np.empty(shape=[0, 4])

                cloths_label = []
                areas_label = []

                for i, c in reversed(list(enumerate(out_classes))):
                    predicted_class = self.class_names[c]
                    box = out_boxes[i]
                    score = out_scores[i]

                    label = '{} {:.2f}'.format(predicted_class, score)
                    draw = ImageDraw.Draw(image)
                    label_size = draw.textsize(label, font)
                    top, left, bottom, right = box
                    top = max(0, np.floor(top + 0.5).astype('int32'))
                    left = max(0, np.floor(left + 0.5).astype('int32'))
                    bottom = min(image.size[1],
                                 np.floor(bottom + 0.5).astype('int32'))
                    right = min(image.size[0],
                                np.floor(right + 0.5).astype('int32'))

                    if predicted_class in self.areaClass:
                        areas_box = np.append(areas_box,
                                              [[left, top, right, bottom]],
                                              axis=0)
                        areas_label.append(predicted_class)
                    else:
                        cloths_box = np.append(cloths_box,
                                               [[left, top, right, bottom]],
                                               axis=0)
                        cloths_label.append(predicted_class)

                    if top - label_size[1] >= 0:
                        text_origin = np.array([left, top - label_size[1]])
                    else:
                        text_origin = np.array([left, top + 1])

                    # My kingdom for a good redistributable image drawing library.
                    for i in range(thickness):
                        draw.rectangle(
                            [left + i, top + i, right - i, bottom - i],
                            outline=self.colors[c])
                    draw.rectangle(
                        [tuple(text_origin),
                         tuple(text_origin + label_size)],
                        fill=self.colors[c])
                    draw.text(text_origin, label, fill=(0, 0, 0), font=font)
                    del draw

                # tmp = cv2.cvtColor(np.asarray(image), cv2.COLOR_BGR2RGB)
                # res = cv2.resize(tmp,None, fx = 0.5, fy=0.5)
                # cv2.imshow('rlt', res)
                # if cv2.waitKey(1) & 0xFF == ord('q'):
                #     break
                # image.show()
                '''
                areas_box = np.append(areas_box, [[10, 10, 20, 20]], axis=0)
                areas_box = np.append(areas_box, [[50, 50, 100, 100]], axis=0)
                areas_label.append('clot')
                areas_label.append('clot')

                cloths_box = np.append(cloths_box, [[15, 15, 30, 30]], axis=0)
                cloths_box = np.append(cloths_box, [[15, 15, 60, 60]], axis=0)
                cloths_label.append('clot')
                cloths_label.append('clot1')
                '''

                if len(areas_label) and len(cloths_label):
                    for i, areas in enumerate(areas_box):
                        inter_area = self.bbox_inter_area(areas, cloths_box)
                        for clothIndex in np.nonzero(inter_area)[0]:
                            if not self.label_match(areas_label[i],
                                                    cloths_label[clothIndex]):
                                errkey = '{}-{}'.format(
                                    areas_label[i], cloths_label[clothIndex])
                                if errkey in self.errdict.keys():
                                    self.errdict[errkey][0] += 1
                                    self.errdict[errkey][2] = frameIndex
                                else:
                                    self.errdict[errkey] = [
                                        1, frameIndex, frameIndex, errkey
                                    ]
                            else:
                                if areas_label[i] in self.corrDict.keys():
                                    self.corrDict[areas_label[i]][0] += 1
                                    self.corrDict[
                                        areas_label[i]][2] = frameIndex
                                else:
                                    self.corrDict[areas_label[i]] = [
                                        1, frameIndex, frameIndex,
                                        areas_label[i]
                                    ]

                                if areas_label[i] not in self.corrList:
                                    self.corrList.append(areas_label[i])

                # print(self.errdict)
                notmatchList = []
                keys = list(self.errdict.keys())
                for key in keys:
                    if frameIndex - self.errdict[key][2] > 60:
                        if self.errdict[key][0] > 10:
                            notmatchList.append(self.errdict[key])
                            # self.testTotalError[key] = self.errdict[key]
                            self.testTotalError.append(self.errdict[key])
                            del (self.errdict[key])
                        else:
                            # self.testDelError [key] = self.errdict[key]
                            self.testDelError.append(self.errdict[key])
                            del (self.errdict[key])

                matchList = []
                corrKeys = list(self.corrDict.keys())
                for key in corrKeys:
                    if frameIndex - self.corrDict[key][2] > 60:
                        if self.corrDict[key][0] > 10:
                            # self.testTotalCorr[key] = self.corrDict[key]
                            matchList.append(self.corrDict[key])
                            self.testTotalCorr.append(self.corrDict[key])
                            del (self.corrDict[key])
                        else:
                            self.testDelCorr.append(self.corrDict[key])
                            del (self.corrDict[key])

                self.update_signal.emit(np.asarray(image))
                # print ('emit cout {}'.format(showcount))
                # print(notmatchList)

            self.newRltSignal.clear()

            if self.stopMainProcSignal.is_set():
                break
Exemplo n.º 51
0
from multiprocessing import Event

#创建事件对象
e = Event()

print(e.is_set())

e.set()

e.wait(5)
print("*************")
print(e.is_set())

e.clear()  # 清除设置
print(e.is_set())
e.wait()
Exemplo n.º 52
0
class Trigger(Process):
    """ Stytra uses
    :class:`Trigger <stytra.triggering.Trigger.__init__()>` objects  to control
    the beginning of a stimulation protocol via an external event.
    In the most obvious case, the signal is sent by
    an acquisition device such as a microscope to synchronize data acquisition
    and stimulation.
    The trigger has a check_trigger function that is constantly called in a while
    loop in the run(). When :meth:`Trigger <stytra.triggering.Trigger.check_trigger(
    )>` returns True, the  start_event is
    set. The Experiment class, if it has a trigger assigned, will wait until the
    start_event to be set before starting. The control in check_trigger() is
    defined in subclasses to reflect the condition that we want to control the
    beginning of the protocol.

    **Events**

    start_event:
        event that is set when check_trigger() returns True. It
        is used by stytra to control the beginning of the protocol;

    kill_event:
        can be set to kill the Trigger process;


    **Output Queues**

    queue_trigger_params:
        can be used to send to the Experiment data about
        the triggering event or device. For example, if triggering happens from
        a microscope via a ZMQ message, setting of the microscope can be sent in
        that message to be saved together with experiment metadata.



    """
    def __init__(self):
        super().__init__()

        self.start_event = Event()
        self.t = datetime.datetime.now()
        self.kill_event = Event()
        self.queue_trigger_params = Queue()

    def check_trigger(self):
        """ Check condition required for triggering to happen. Implemented in
        subclasses.

        Returns
        -------
        bool
            True if triggering condition is satisfied (e.g., message received);
            False otherwise.

        """
        return False

    def run(self):
        """ In this process, we constantly invoke the check_trigger class to control
        if start_event has to be set. Once it has been set, we wait an
        arbitrary time (0.1 s now) and then we clear it to be set again.
        """
        TIME_START_EVENT_ON = 0.1
        while True:
            self.kill_event.wait(0.0001)
            if self.kill_event.is_set():
                break

            if self.start_event.is_set():
                # Keep the signal on for at least 0.1 s
                time.sleep(TIME_START_EVENT_ON)
                self.start_event.clear()
                if self.start_event.is_set():
                    print(
                        "Trying to start when the start event is already set")

            if self.check_trigger():
                print("Trigger signal received")
                self.start_event.set()
                self.t = datetime.datetime.now()
Exemplo n.º 53
0
class TestAcceptanceCompareFirmwares(TestAcceptanceBase):
    def setUp(self):
        super().setUp()
        self.analysis_finished_event = Event()
        self.compare_finished_event = Event()
        self.elements_finished_analyzing = Value('i', 0)
        self.db_backend_service = BackEndDbInterface(config=self.config)
        self._start_backend(post_analysis=self._analysis_callback,
                            compare_callback=self._compare_callback)
        time.sleep(2)  # wait for systems to start

    def tearDown(self):
        self._stop_backend()
        self.db_backend_service.shutdown()
        super().tearDown()

    def _analysis_callback(self, fo):
        self.db_backend_service.add_object(fo)
        self.elements_finished_analyzing.value += 1
        if self.elements_finished_analyzing.value == 4 * 2 * 2:  # two firmware container with 3 included files each times two plugins
            self.analysis_finished_event.set()

    def _compare_callback(self):
        self.compare_finished_event.set()

    def _upload_firmware_get(self):
        rv = self.test_client.get('/upload')
        self.assertIn(b'<h2>Upload Firmware</h2>', rv.data,
                      'upload page not displayed correctly')

    def _upload_firmware_put(self, path, device_name, uid):
        testfile_path = os.path.join(get_test_data_dir(), path)
        with open(testfile_path, 'rb') as fp:
            data = {
                'file': fp,
                'device_name': device_name,
                'device_part': 'full',
                'device_class': 'test_class',
                'version': '1.0',
                'vendor': 'test_vendor',
                'release_date': '01.01.1970',
                'tags': '',
                'analysis_systems': []
            }
            rv = self.test_client.post('/upload',
                                       content_type='multipart/form-data',
                                       data=data,
                                       follow_redirects=True)
        self.assertIn(b'Upload Successful', rv.data, 'upload not successful')
        self.assertIn(uid.encode(), rv.data,
                      'uid not found on upload success page')

    def _add_firmwares_to_compare(self):
        rv = self.test_client.get('/analysis/{}'.format(self.test_fw_a.uid))
        self.assertIn(self.test_fw_a.uid, rv.data.decode(), '')
        rv = self.test_client.get('/comparison/add/{}'.format(
            self.test_fw_a.uid),
                                  follow_redirects=True)
        self.assertIn('Firmwares Selected for Comparison', rv.data.decode())

        rv = self.test_client.get('/analysis/{}'.format(self.test_fw_c.uid))
        self.assertIn(self.test_fw_c.uid, rv.data.decode())
        self.assertIn(self.test_fw_c.name, rv.data.decode())
        rv = self.test_client.get('/comparison/add/{}'.format(
            self.test_fw_c.uid),
                                  follow_redirects=True)
        self.assertIn('Remove All', rv.data.decode())

    def _start_compare(self):
        rv = self.test_client.get('/compare', follow_redirects=True)
        self.assertIn(b'Your compare task is in progress.', rv.data,
                      'compare wait page not displayed correctly')

    def _show_comparison_results(self):
        rv = self.test_client.get('/compare/{};{}'.format(
            self.test_fw_a.uid, self.test_fw_c.uid))
        self.assertIn(self.test_fw_a.name.encode(), rv.data,
                      'test firmware a comparison not displayed correctly')
        self.assertIn(self.test_fw_c.name.encode(), rv.data,
                      'test firmware b comparison not displayed correctly')
        self.assertIn(b'File Coverage', rv.data,
                      'comparison page not displayed correctly')

    def _show_home_page(self):
        rv = self.test_client.get('/')
        self.assertIn(b'Latest Comparisons', rv.data,
                      'latest comparisons not displayed on "home"')

    def _show_compare_browse(self):
        rv = self.test_client.get('/database/browse_compare')
        self.assertIn(self.test_fw_a.name.encode(), rv.data,
                      'no compare result shown in browse')

    def _show_analysis_without_compare_list(self):
        rv = self.test_client.get('/analysis/{}'.format(self.test_fw_a.uid))
        assert b'Show List of Known Comparisons' not in rv.data

    def _show_analysis_with_compare_list(self):
        rv = self.test_client.get('/analysis/{}'.format(self.test_fw_a.uid))
        assert b'Show List of Known Comparisons' in rv.data

    def test_compare_firmwares(self):
        self._upload_firmware_get()
        for fw in [self.test_fw_a, self.test_fw_c]:
            self._upload_firmware_put(fw.path, fw.name, fw.uid)
        self.analysis_finished_event.wait(timeout=20)
        self._show_analysis_without_compare_list()
        self._add_firmwares_to_compare()
        self._start_compare()
        self.compare_finished_event.wait(timeout=20)
        self._show_comparison_results()
        self._show_home_page()
        self._show_compare_browse()
        self._show_analysis_with_compare_list()
Exemplo n.º 54
0
class Core(Process):

    __SESSIONFILENAME = 'session.ini'
    __CONFIGFILENAME = 'config.ini'
    DEFAULT_LANGUAGE = 'english'
    DEFAULT_USERNAME = '******'
    DEFAULT_PASSWORD = '******'
    DEFAULT_STORAGEDIRNAME = 'downloads'
    DEFAULT_LOGDIRNAME = 'logs'
    DEFAULT_LOGFILENAME = 'log.txt'

    def _init_consolelogger(self):
        if self.config.get('log', 'color_console') and ismodule('colorlog'):
            fmt = "%(label)s %(levelname)-8s %(reset)s %(log_color)s%(asctime)s  %(message)s"
            datefmt = "%Y-%m-%d  %H:%M:%S"
            primary_colors = {
                'DEBUG': "bold,cyan",
                'WARNING': "bold,yellow",
                'ERROR': "bold,red",
                'CRITICAL': "bold,purple",
            }
            secondary_colors = {
                'label': {
                    'DEBUG': "bold,white,bg_cyan",
                    'INFO': "bold,white,bg_green",
                    'WARNING': "bold,white,bg_yellow",
                    'ERROR': "bold,white,bg_red",
                    'CRITICAL': "bold,white,bg_purple",
                }
            }
            consoleform = colorlog.ColoredFormatter(
                fmt,
                datefmt,
                primary_colors,
                secondary_log_colors=secondary_colors)
        else:
            fmt = "%(asctime)s  %(levelname)-8s  %(message)s"
            datefmt = "%Y-%m-%d %H:%M:%S"
            consoleform = logging.Formatter(fmt, datefmt)

        consolehdlr = logging.StreamHandler(sys.stdout)
        consolehdlr.setFormatter(consoleform)
        self.log.addHandler(consolehdlr)

    def _init_syslogger(self):
        # try to mimic to normal syslog messages
        fmt = "%(asctime)s %(name)s: %(message)s"
        datefmt = "%b %e %H:%M:%S"
        syslogform = logging.Formatter(fmt, datefmt)
        syslogaddr = None

        syslog = self.config.get('log', 'syslog')
        if syslog == 'remote':
            syslog_host = self.config.get('log', 'syslog_host')
            syslog_port = self.config.get('log', 'syslog_port')
            syslogaddr = (syslog_host, syslog_port)
        else:
            syslog_folder = self.config.get('log', 'syslog_folder')
            if syslogaddr:
                syslogaddr = syslog_folder
            elif sys.platform == 'darwin':
                syslogaddr = '/var/run/syslog'
            elif os.name != 'nt':
                syslogaddr = '/dev/log'

        sysloghdlr = logging.handlers.SysLogHandler(syslogaddr)
        sysloghdlr.setFormatter(syslogform)
        self.log.addHandler(sysloghdlr)

    def _init_filelogger(self):
        fmt = "%(asctime)s  %(levelname)-8s  %(message)s"
        datefmt = "%Y-%m-%d %H:%M:%S"
        fileform = logging.Formatter(fmt, datefmt)

        logfile_folder = self.config.get('log', 'logfile_folder')
        if not logfile_folder:
            logfile_folder = self.DEFAULT_LOGDIRNAME
        makedirs(logfile_folder, exist_ok=True)

        logfile_name = self.config.get('log', 'logfile_name')
        if not logfile_name:
            logfile_name = self.DEFAULT_LOGFILENAME
        logfile = os.path.join(logfile_folder, logfile_name)

        if self.config.get('log', 'rotate'):
            logfile_size = self.config.get('log', 'logfile_size') << 10
            max_logfiles = self.config.get('log', 'max_logfiles')
            filehdlr = logging.handlers.RotatingFileHandler(
                logfile,
                maxBytes=logfile_size,
                backupCount=max_logfiles,
                encoding=locale.getpreferredencoding(do_setlocale=False))
        else:
            filehdlr = logging.FileHandler(
                logfile,
                encoding=locale.getpreferredencoding(do_setlocale=False))

        filehdlr.setFormatter(fileform)
        self.log.addHandler(filehdlr)

    # TODO: Extend `logging.Logger` like `..plugin.Log`
    def _init_logger(self):
        level = logging.DEBUG if self.debug else logging.INFO

        # Init logger
        self.log = logging.getLogger()
        self.log.setLevel(level)

        # Set console handler
        self._init_consolelogger()

        # Set syslog handler
        if self.config.get('log', 'syslog') != 'no':
            self._init_syslogger()

        # Set file handler
        if self.config.get('log', 'logfile'):
            self._init_filelogger()

    def _setup_permissions(self):
        if os.name == 'nt':
            return None

        change_group = self.config.get('permission', 'change_group')
        change_user = self.config.get('permission', 'change_user')

        if change_group:
            try:
                group = self.config.get('permission', 'group')
                set_process_group(group)
            except Exception as e:
                self.log.error(self._("Unable to change gid"), str(e))

        if change_user:
            try:
                user = self.config.get('permission', 'user')
                set_process_user(user)
            except Exception as e:
                self.log.error(self._("Unable to change uid"), str(e))

    def set_language(self, lang):
        localedir = resource_filename(__package__, 'locale')
        lc = locale.locale_alias[lang.lower()].split('_', 1)[0]
        trans = get_translation('core', localedir, (lc, ))
        try:
            self._ = trans.ugettext
        except AttributeError:
            self._ = trans.gettext

    def _setup_language(self):
        self.log.debug("Loading language ...")
        lang = self.config.get('general', 'language')
        default = self.DEFAULT_LANGUAGE
        if not lang:
            code = locale.getlocale()[0] or locale.getdefaultlocale()[0]
            lang = default if code is None else code.lower().split('_', 1)[0]
        try:
            self.set_language(lang)
        except Exception as e:
            if lang == default:
                raise
            self.log.warning(
                self._("Unable to load `{0}` language, "
                       "use default `{1}`").format(lang, default), str(e))
            self.set_language(default)

    def _setup_debug(self):
        if self.__debug is None:
            debug_log = self.config.get('log', 'debug')
            verbose_log = self.config.get('log', 'verbose')
            self.__debug = 0 if not debug_log else 2 if verbose_log else 1

    # def start_interface(self, webui=None, rpc=None):
    # if webui is None:
    # webui = self.__webui
    # if rpc is None:
    # rpc = self.__rpc

    # # TODO: Parse `remote`
    # if rpc or self.config.get('rpc', 'activated'):
    # self.log.debug("Activating RPC interface ...")
    # self.rem.start()
    # elif not webui:
    # webui = True

    # TODO: Parse remote host:port

    # if isinstance(webui, str):
    # host, port = map(str.strip, webui.rsplit(':', 1))
    # webui = True
    # else:
    # host, port = (None, None)
    # kwgs = {
    # 'server': self.config.get('webui', 'server'),
    # 'host': host or self.config.get('webui', 'host'),
    # 'port': port or self.config.get('webui', 'port'),
    # 'key': self.config.get('ssl', 'key'),
    # 'cert': self.config.get('ssl', 'cert'),
    # 'ssl': self.config.get('ssl', 'activated')
    # }
    # if webui or self.config.get('webui', 'activated'):
    # from .thread.webserver import WebServer
    # self.webserver = WebServer(self)
    # self.webserver.start()
    # self.svm.add('webui', **kwgs)
    # self.svm.start()

    def _init_api(self):
        from .api import Api
        self.api = Api(self)

    def _init_database(self):
        from .database import DatabaseBackend
        from .datatype import Permission, Role

        # TODO: Move inside DatabaseBackend
        newdb = not os.path.isfile(DatabaseBackend.DB_FILE)
        self.db = DatabaseBackend(self)
        self.db.setup()

        if self.__restore or newdb:
            self.db.add_user(self.DEFAULT_USERNAME, self.DEFAULT_PASSWORD,
                             Role.Admin, Permission.All)
        if self.__restore:
            self.log.warning(
                self._("Restored default login credentials `admin|pyload`"))

    def _init_managers(self):
        from .manager import (AccountManager, AddonManager, EventManager,
                              ExchangeManager, FileManager, InfoManager,
                              PluginManager, TransferManager)

        self.scheduler = sched.scheduler(time.time, time.sleep)
        self.filemanager = self.files = FileManager(self)
        self.pluginmanager = self.pgm = PluginManager(self)
        self.exchangemanager = self.exm = ExchangeManager(self)
        self.eventmanager = self.evm = EventManager(self)
        self.accountmanager = self.acm = AccountManager(self)
        self.infomanager = self.iom = InfoManager(self)
        self.transfermanager = self.tsm = TransferManager(self)
        self.addonmanager = self.adm = AddonManager(self)
        # self.remotemanager = self.rem = RemoteManager(self)
        # self.servermanager = self.svm = ServerManager(self)
        self.db.manager = self.files  # ugly?

    def _init_requests(self):
        self.request = self.req = RequestFactory(self)

    def _init_config(self):
        session = ConfigParser(self.__SESSIONFILENAME, session_defaults)

        flags = portalocker.LOCK_EX | portalocker.LOCK_NB
        portalocker.lock(session.fp, flags)

        profiledir = os.path.join(self.configdir, self.profile)
        psp = psutil.Process()
        session.set('current', 'id', time.time())
        session.set('current', 'profile', 'path', profiledir)
        session.set('current', 'profile', 'pid', psp.pid)
        session.set('current', 'profile', 'ctime', psp.create_time())

        self.config = ConfigParser(self.__CONFIGFILENAME, config_defaults)
        self.session = session

    def _init_cache(self):
        # Re-use cache
        tempdir = self.__tempdir
        if tempdir is None:
            tempdir = self.session.get('previous', 'cache', 'path')
            if tempdir is None or not os.path.isdir(tempdir):
                pydir = os.path.join(TMPDIR, __namespace__)
                makedirs(pydir, exist_ok=True)
                tempdir = tempfile.mkdtemp(dir=pydir)
        self.session.set('current', 'cache', 'path', tempdir)
        self.cachedir = tempdir
        # if tempdir not in sys.path:
        # sys.path.append(tempdir)

    def _register_signals(self):
        shutfn = lambda s, f: self.shutdown()
        quitfn = lambda s, f: self.terminate()
        try:
            if os.name == 'nt':
                # signal.signal(signal.CTRL_C_EVENT, shutfn)
                signal.signal(signal.CTRL_BREAK_EVENT, shutfn)
            else:
                signal.signal(signal.SIGTERM, shutfn)
                # signal.signal(signal.SIGINT, shutfn)
                signal.signal(signal.SIGQUIT, quitfn)
                # signal.signal(signal.SIGTSTP, lambda s, f: self.stop())
                # signal.signal(signal.SIGCONT, lambda s, f: self.run())
        except Exception:
            pass

    def __init__(self,
                 profiledir=None,
                 tempdir=None,
                 debug=None,
                 restore=None):
        self.__running = Event()
        self.__do_restart = False
        self.__do_shutdown = False
        self.__debug = debug if debug is None else int(debug)
        self.__restore = bool(restore)
        self.__tempdir = tempdir
        self._ = lambda x: x

        self._init_profile(profiledir)

        # if refresh:
        # cleanpy(PACKDIR)

        Process.__init__(self)

    @property
    def version(self):
        return __version__

    @property
    def version_info(self):
        return __version_info__

    @property
    def running(self):
        return self.__running.is_set()

    @property
    def debug(self):
        return self.__debug

    def _init_profile(self, profiledir):
        profiledir = fullpath(profiledir)
        os.chdir(profiledir)
        self.configdir, self.profile = os.path.split(profiledir)

    def _setup_process(self):
        try:
            set_process_name('pyLoad')
        except AttributeError:
            pass
        niceness = self.config.get('general', 'niceness')
        renice(niceness=niceness)
        ioniceness = int(self.config.get('general', 'ioniceness'))
        ionice(niceness=ioniceness)

    def _setup_storage(self):
        storage_folder = self.config.get('general', 'storage_folder')
        if not storage_folder:
            storage_folder = os.path.join(USERDIR, self.DEFAULT_STORAGEDIRNAME)
        self.log.debug("Storage: {0}".format(storage_folder))
        makedirs(storage_folder, exist_ok=True)
        avail_space = format.size(availspace(storage_folder))
        self.log.info(
            self._("Available storage space: {0}").format(avail_space))

    def _workloop(self):
        self.__running.set()
        self.tsm.pause = False  # NOTE: Recheck...
        while True:
            self.__running.wait()
            self.tsm.work()
            self.iom.work()
            self.exm.work()
            if self.__do_restart:
                raise Restart
            if self.__do_shutdown:
                raise Shutdown
            self.scheduler.run()

    def _start_plugins(self):
        # TODO: Move to accountmanager
        self.log.info(self._("Activating accounts ..."))
        self.acm.load_accounts()
        # self.scheduler.enter(0, 0, self.acm.load_accounts)
        self.adm.activate_addons()

    def _show_info(self):
        self.log.info(self._("Welcome to pyLoad v{0}").format(self.version))

        self.log.info(self._("Profile: {0}").format(self.profile))
        self.log.info(self._("Config directory: {0}").format(self.configdir))

        self.log.debug("Cache directory: {0}".format(self.cachedir))

    def run(self):
        self._init_config()
        self._init_cache()
        self._setup_debug()
        self._init_logger()
        try:
            self.log.debug("Running pyLoad ...")

            self._setup_language()
            self._setup_permissions()
            self._init_database()
            self._init_managers()
            self._init_requests()
            self._init_api()

            self._show_info()
            self._setup_storage()
            self._start_plugins()
            self._setup_process()

            self.log.info(self._("pyLoad is up and running"))
            self.evm.fire('pyload:started')

            # # some memory stats
            # from guppy import hpy
            # hp=hpy()
            # print(hp.heap())
            # import objgraph
            # objgraph.show_most_common_types(limit=30)
            # import memdebug
            # memdebug.start(8002)
            # from meliae import scanner
            # scanner.dump_all_objects(os.path.join(PACKDIR, 'objs.json'))

            self._workloop()

        except Restart:
            self.restart()
        except Shutdown:
            self.shutdown()
        except (KeyboardInterrupt, SystemExit):
            self.shutdown()
        except Exception as e:
            self.log.critical(str(e))
            self.terminate()
            raise
        else:
            self.shutdown()

    def _remove_loggers(self):
        for handler in self.log.handlers:
            with closing(handler) as hdlr:
                self.log.removeHandler(hdlr)

    def restart(self):
        self.stop()
        self.log.info(self._("Restarting pyLoad ..."))
        self.evm.fire('pyload:restarting')
        self.start()

    def _register_instance(self):
        profiledir = os.path.join(self.configdir, self.profile)
        if profiledir in _pmap:
            raise RuntimeError("A pyLoad instance using profile `{0}` "
                               "is already running".format(profiledir))
        _pmap[profiledir] = self

    def _unregister_instance(self):
        profiledir = os.path.join(self.configdir, self.profile)
        _pmap.pop(profiledir, None)

    def _close_session(self):
        id = self.session.get('previous', 'id')
        self.session[id] = self.session['previous']
        self.session['previous'] = self.session['current']
        self.session['current'].reset()
        self.session.close()

    def terminate(self):
        try:
            self.log.debug("Killing pyLoad ...")
            self._unregister_instance()
            self._close_session()
        finally:
            Process.terminate(self)

    def shutdown(self):
        try:
            self.stop()
            self.log.info(self._("Exiting pyLoad ..."))
            self.tsm.shutdown()
            self.db.shutdown()  # NOTE: Why here?
            self.config.close()
            self._remove_loggers()
            # if cleanup:
            # self.log.info(self._("Deleting temp files ..."))
            # remove(self.tempdir, ignore_errors=True)
        finally:
            self.terminate()

    def start(self):
        if not self.is_alive():
            self._register_instance()
            self._register_signals()
            Process.start(self)
        elif not self.running:
            self.log.info(self._("Starting pyLoad ..."))
            self.evm.fire('pyload:starting')
            self.__running.set()

    def stop(self):
        if not self.running:
            return None
        try:
            self.log.info(self._("Stopping pyLoad ..."))
            self.evm.fire('pyload:stopping')
            self.adm.deactivate_addons()
            self.api.stop_all_downloads()
        finally:
            self.files.sync_save()
            self.__running.clear()
            self.evm.fire('pyload:stopped')
Exemplo n.º 55
0
class NefitCore(object):
    _accesskey_prefix = 'Ct7ZR03b_'
    _rrc_contact_prefix = 'rrccontact_'
    _rrc_gateway_prefix = 'rrcgateway_'
    _magic = bytearray.fromhex("58f18d70f667c9c79ef7de435bf0f9b1553bbb6e61816212ab80e5b0d351fbb1")

    serial_number = None
    access_key = None
    password = None

    jid = None
    _from = None
    _to = None
    client = None
    encryption = None

    event = None
    container = {}

    def __init__(self, serial_number, access_key, password, host="wa2-mz36-qrmzh6.bosch.de", sasl_mech="DIGEST-MD5"):
        """

        :param serial_number:
        :param access_key:
        :param password:
        :param host:
        :param sasl_mech:
        """
        serial_number = str(serial_number)
        self.serial_number = serial_number
        self.access_key = access_key
        self.password = password

        self.encryption = AESCipher(self._magic, access_key, password)

        identifier = serial_number + "@" + host
        self.jid = jid = self._from = self._rrc_contact_prefix + identifier
        self._to = self._rrc_gateway_prefix + identifier

        self.client = ClientXMPP(jid=jid, password=self._accesskey_prefix + access_key, sasl_mech=sasl_mech)
        self.client.add_event_handler("session_start", self.session_start)
        self.client.register_plugin('xep_0199')

    @staticmethod
    def set_verbose():
        import logging
        logging.basicConfig(filename="debug.log", level=logging.DEBUG)

    def message(self, msg):
        if msg['type'] in ('chat', 'normal'):
            headers = msg['body'].split("\n")[:-1]
            body = msg['body'].split("\n")[-1:][0]
            if 'HTTP/1.0 400 Bad Request' in headers:
                return
            response = self.decrypt(body)
            if 'Content-Type: application/json' in headers:
                _LOGGER.debug("response='{}'".format(response))
                response = response.strip()
                if len(response) > 1:
                    response = json.loads(response.strip())
            self.container[id(self.event)] = response
        self.event.set()

    def connect(self, block=False):
        self.client.connect()
        self.client.process(block=block)

    def session_start(self, event):
        self.client.send_presence()
        self.client.get_roster()

    def disconnect(self):
        self.client.disconnect()

    def get(self, uri, timeout=10):
        self.event = Event()
        self.client.add_event_handler("message", self.message)
        self.send("GET %s HTTP/1.1\rUser-Agent: NefitEasy\r\r" % uri)
        self.event.wait(timeout=timeout)
        self.client.del_event_handler("message", self.message)
        return self.container[id(self.event)] if id(self.event) in self.container.keys() else None

    def put(self, uri, data, timeout=10):
        data = data if isinstance(data, str) else json.dumps(data, separators=(',', ':'))
        encrypted_data = self.encrypt(data).decode("utf8")
        body = "\r".join([
            'PUT %s HTTP/1.1' % uri,
            'Content-Type: application/json',
            'Content-Length: %i' % len(encrypted_data),
            'User-Agent: NefitEasy\r',
            encrypted_data
        ])
        self.event = Event()
        self.client.add_event_handler("message", self.message)
        self.send(body)
        self.event.wait(timeout=timeout)
        self.client.del_event_handler("message", self.message)

    def send(self, body):
        # this horrible piece of code breaks xml syntax but does actually work...
        body = body.replace("\r", "&#13;\n")
        message = self.client.make_message(mto=self._to, mfrom=self._from, mbody=body)
        message['lang'] = None
        str_data = tostring(message.xml, xmlns=message.stream.default_ns,
                            stream=message.stream,
                            top_level=True)
        str_data = str_data.replace("&amp;#13;", "&#13;")
        return message.stream.send_raw(str_data)

    def encrypt(self, data):
        return self.encryption.encrypt(data)

    def decrypt(self, data):
        return self.encryption.decrypt(data)

    def get_display_code(self):
        return self.get('/system/appliance/displaycode')

    def get_status(self):
        return self.get('/ecus/rrc/uiStatus')

    def get_location(self):
        return self.get('/system/location/latitude'), self.get('/system/location/longitude')

    def get_outdoor(self):
        return self.get('/system/sensors/temperatures/outdoor_t1')

    def get_pressure(self):
        return self.get('/system/appliance/systemPressure')

    def get_program(self):
        return (
            self.get('/ecus/rrc/userprogram/activeprogram'),
            self.get('/ecus/rrc/userprogram/program1'),
            self.get('/ecus/rrc/userprogram/program2'),
        )

    def get_year_total(self):
        return self.get('/ecus/rrc/recordings/yearTotal')

    def set_temperature(self, temperature):
        self.put('/heatingCircuits/hc1/temperatureRoomManual', {'value': float(temperature)})
        self.put('/heatingCircuits/hc1/manualTempOverride/status', {'value': 'on'})
        self.put('/heatingCircuits/hc1/manualTempOverride/temperature', {'value': float(temperature)})

    def get_actualSupplyTemperature(self):
        return self.get('/heatingCircuits/hc1/actualSupplyTemperature')
Exemplo n.º 56
0
class Speedtest(object):
    def __init__(self):
        from multiprocessing import Event
        self.event = Event()
        self.has_stopped = False

    def speedtest_thread(self):
        if self.event.wait(600):
            return

        logging.info("Speedtest starting...You can't stop right now!")
        CTid = 0
        speedtest_ct = speedtest.Speedtest()
        speedtest_ct.get_servers()
        servers_list = []
        for _, servers in sorted(speedtest_ct.servers.items()):
            for server in servers:
                if server['country'].find('China') != -1 and server[
                        'sponsor'].find('Telecom') != -1:
                    servers_list.append(server)
        speedtest_ct.get_best_server(servers_list)
        results_ct = speedtest_ct.results
        CTPing = str(results_ct.server['latency']) + ' ms'
        speedtest_ct.download()
        CTDLSpeed = str(round(
            (results_ct.download / 1000 / 1000), 2)) + " Mbit/s"
        speedtest_ct.upload()
        CTUpSpeed = str(round(
            (results_ct.upload / 1000 / 1000), 2)) + " Mbit/s"

        CUid = 0
        speedtest_cu = speedtest.Speedtest()
        speedtest_cu.get_servers()
        servers_list = []
        for _, servers in sorted(speedtest_cu.servers.items()):
            for server in servers:
                if server['country'].find('China') != -1 and server[
                        'sponsor'].find('Unicom') != -1:
                    servers_list.append(server)
        speedtest_cu.get_best_server(servers_list)
        results_cu = speedtest_cu.results
        CUPing = str(results_cu.server['latency']) + ' ms'
        speedtest_cu.download()
        CUDLSpeed = str(round(
            (results_cu.download / 1000 / 1000), 2)) + " Mbit/s"
        speedtest_cu.upload()
        CUUpSpeed = str(round(
            (results_cu.upload / 1000 / 1000), 2)) + " Mbit/s"

        CMid = 0
        speedtest_cm = speedtest.Speedtest()
        speedtest_cm.get_servers()
        servers_list = []
        for _, servers in sorted(speedtest_cm.servers.items()):
            for server in servers:
                if server['country'].find('China') != -1 and server[
                        'sponsor'].find('Mobile') != -1:
                    servers_list.append(server)
        speedtest_cm.get_best_server(servers_list)
        results_cm = speedtest_cm.results
        CMPing = str(results_cm.server['latency']) + ' ms'
        speedtest_cm.download()
        CMDLSpeed = str(round(
            (results_cm.download / 1000 / 1000), 2)) + " Mbit/s"
        speedtest_cm.upload()
        CMUpSpeed = str(round(
            (results_cm.upload / 1000 / 1000), 2)) + " Mbit/s"

        if configloader.get_config().API_INTERFACE == 'modwebapi':
            webapi.postApi('func/speedtest',
                           {'node_id': configloader.get_config().NODE_ID}, {
                               'data': [{
                                   'telecomping': CTPing,
                                   'telecomeupload': CTUpSpeed,
                                   'telecomedownload': CTDLSpeed,
                                   'unicomping': CUPing,
                                   'unicomupload': CUUpSpeed,
                                   'unicomdownload': CUDLSpeed,
                                   'cmccping': CMPing,
                                   'cmccupload': CMUpSpeed,
                                   'cmccdownload': CMDLSpeed
                               }]
                           })
        else:
            import cymysql
            if configloader.get_config().MYSQL_SSL_ENABLE == 1:
                conn = cymysql.connect(
                    host=configloader.get_config().MYSQL_HOST,
                    port=configloader.get_config().MYSQL_PORT,
                    user=configloader.get_config().MYSQL_USER,
                    passwd=configloader.get_config().MYSQL_PASS,
                    db=configloader.get_config().MYSQL_DB,
                    charset='utf8',
                    ssl={
                        'ca': configloader.get_config().MYSQL_SSL_CA,
                        'cert': configloader.get_config().MYSQL_SSL_CERT,
                        'key': configloader.get_config().MYSQL_SSL_KEY
                    })
            else:
                conn = cymysql.connect(
                    host=configloader.get_config().MYSQL_HOST,
                    port=configloader.get_config().MYSQL_PORT,
                    user=configloader.get_config().MYSQL_USER,
                    passwd=configloader.get_config().MYSQL_PASS,
                    db=configloader.get_config().MYSQL_DB,
                    charset='utf8')
            conn.autocommit(True)
            cur = conn.cursor()
            cur.execute(
                "INSERT INTO `speedtest` (`id`, `nodeid`, `datetime`, `telecomping`, `telecomeupload`, `telecomedownload`, `unicomping`, `unicomupload`, `unicomdownload`, `cmccping`, `cmccupload`, `cmccdownload`) VALUES (NULL, '"
                + str(configloader.get_config().NODE_ID) +
                "', unix_timestamp(), '" + CTPing + "', '" + CTUpSpeed +
                "', '" + CTDLSpeed + "', '" + CUPing + "', '" + CUUpSpeed +
                "', '" + CUDLSpeed + "', '" + CMPing + "', '" + CMUpSpeed +
                "', '" + CMDLSpeed + "')")
            cur.close()
            conn.close()

        logging.info("Speedtest finished")

    @staticmethod
    def thread_db(obj):

        if configloader.get_config().SPEEDTEST == 0:
            return

        if configloader.get_config().API_INTERFACE == 'modwebapi':
            import webapi_utils

            global webapi
            webapi = webapi_utils.WebApi()

        global db_instance
        db_instance = obj()

        try:
            while True:
                try:
                    db_instance.speedtest_thread()
                except Exception as e:
                    import traceback
                    trace = traceback.format_exc()
                    logging.error(trace)
                    #logging.warn('db thread except:%s' % e)
                if db_instance.event.wait(configloader.get_config().SPEEDTEST *
                                          3600):
                    break
                if db_instance.has_stopped:
                    break
        except KeyboardInterrupt as e:
            pass
        db_instance = None

    @staticmethod
    def thread_db_stop():
        global db_instance
        db_instance.has_stopped = True
        db_instance.event.set()
Exemplo n.º 57
0
class Artifacts(object):
    _flush_frequency_sec = 300.
    # notice these two should match
    _save_format = '.csv.gz'
    _compression = 'gzip'
    # hashing constants
    _hash_block_size = 65536
    _pd_artifact_type = 'data-audit-table'

    class _ProxyDictWrite(dict):
        """ Dictionary wrapper that updates an arguments instance on any item set in the dictionary """
        def __init__(self, artifacts_manager, *args, **kwargs):
            super(Artifacts._ProxyDictWrite, self).__init__(*args, **kwargs)
            self._artifacts_manager = artifacts_manager
            # list of artifacts we should not upload (by name & weak-reference)
            self.artifact_metadata = {}
            # list of hash columns to calculate uniqueness for the artifacts
            self.artifact_hash_columns = {}

        def __setitem__(self, key, value):
            # check that value is of type pandas
            if pd and isinstance(value, pd.DataFrame):
                super(Artifacts._ProxyDictWrite, self).__setitem__(key, value)

                if self._artifacts_manager:
                    self._artifacts_manager.flush()
            else:
                raise ValueError(
                    'Artifacts currently support pandas.DataFrame objects only'
                )

        def unregister_artifact(self, name):
            self.artifact_metadata.pop(name, None)
            self.pop(name, None)

        def add_metadata(self, name, metadata):
            self.artifact_metadata[name] = deepcopy(metadata)

        def get_metadata(self, name):
            return self.artifact_metadata.get(name)

        def add_hash_columns(self, artifact_name, hash_columns):
            self.artifact_hash_columns[artifact_name] = hash_columns

        def get_hash_columns(self, artifact_name):
            return self.artifact_hash_columns.get(artifact_name)

    @property
    def registered_artifacts(self):
        return self._artifacts_container

    @property
    def summary(self):
        return self._summary

    def __init__(self, task):
        self._task = task
        # notice the double link, this important since the Artifact
        # dictionary needs to signal the Artifacts base on changes
        self._artifacts_container = self._ProxyDictWrite(self)
        self._last_artifacts_upload = {}
        self._unregister_request = set()
        self._thread = None
        self._flush_event = Event()
        self._exit_flag = False
        self._summary = ''
        self._temp_folder = []
        self._task_artifact_list = []
        self._task_edit_lock = RLock()
        self._storage_prefix = None

    def register_artifact(self,
                          name,
                          artifact,
                          metadata=None,
                          uniqueness_columns=True):
        """
        :param str name: name of the artifacts. Notice! it will override previous artifacts if name already exists.
        :param pandas.DataFrame artifact: artifact object, supported artifacts object types: pandas.DataFrame
        :param dict metadata: dictionary of key value to store with the artifact (visible in the UI)
        :param list uniqueness_columns: list of columns for artifact uniqueness comparison criteria. The default value
            is True, which equals to all the columns (same as artifact.columns).
        """
        # currently we support pandas.DataFrame (which we will upload as csv.gz)
        if name in self._artifacts_container:
            LoggerRoot.get_base_logger().info(
                'Register artifact, overwriting existing artifact \"{}\"'.
                format(name))
        self._artifacts_container.add_hash_columns(
            name,
            list(artifact.columns
                 if uniqueness_columns is True else uniqueness_columns))
        self._artifacts_container[name] = artifact
        if metadata:
            self._artifacts_container.add_metadata(name, metadata)

    def unregister_artifact(self, name):
        # Remove artifact from the watch list
        self._unregister_request.add(name)
        self.flush()

    def upload_artifact(self,
                        name,
                        artifact_object=None,
                        metadata=None,
                        delete_after_upload=False):
        if not Session.check_min_api_version('2.3'):
            LoggerRoot.get_base_logger().warning(
                'Artifacts not supported by your TRAINS-server version, '
                'please upgrade to the latest server version')
            return False

        if name in self._artifacts_container:
            raise ValueError(
                "Artifact by the name of {} is already registered, use register_artifact"
                .format(name))

        artifact_type_data = tasks.ArtifactTypeData()
        override_filename_in_uri = None
        override_filename_ext_in_uri = None
        uri = None
        if np and isinstance(artifact_object, np.ndarray):
            artifact_type = 'numpy'
            artifact_type_data.content_type = 'application/numpy'
            artifact_type_data.preview = str(artifact_object.__repr__())
            override_filename_ext_in_uri = '.npz'
            override_filename_in_uri = name + override_filename_ext_in_uri
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.close(fd)
            np.savez_compressed(local_filename, **{name: artifact_object})
            delete_after_upload = True
        elif pd and isinstance(artifact_object, pd.DataFrame):
            artifact_type = 'pandas'
            artifact_type_data.content_type = 'text/csv'
            artifact_type_data.preview = str(artifact_object.__repr__())
            override_filename_ext_in_uri = self._save_format
            override_filename_in_uri = name
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.close(fd)
            artifact_object.to_csv(local_filename,
                                   compression=self._compression)
            delete_after_upload = True
        elif isinstance(artifact_object, Image.Image):
            artifact_type = 'image'
            artifact_type_data.content_type = 'image/png'
            desc = str(artifact_object.__repr__())
            artifact_type_data.preview = desc[1:desc.find(' at ')]
            override_filename_ext_in_uri = '.png'
            override_filename_in_uri = name + override_filename_ext_in_uri
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.close(fd)
            artifact_object.save(local_filename)
            delete_after_upload = True
        elif isinstance(artifact_object, dict):
            artifact_type = 'JSON'
            artifact_type_data.content_type = 'application/json'
            preview = json.dumps(artifact_object, sort_keys=True, indent=4)
            override_filename_ext_in_uri = '.json'
            override_filename_in_uri = name + override_filename_ext_in_uri
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.write(fd, bytes(preview.encode()))
            os.close(fd)
            artifact_type_data.preview = preview
            delete_after_upload = True
        elif isinstance(artifact_object, six.string_types) and urlparse(
                artifact_object).scheme in remote_driver_schemes:
            # we should not upload this, just register
            local_filename = None
            uri = artifact_object
            artifact_type = 'custom'
            artifact_type_data.content_type = mimetypes.guess_type(
                artifact_object)[0]
        elif isinstance(artifact_object, six.string_types + (Path, )):
            # check if single file
            artifact_object = Path(artifact_object)

            artifact_object.expanduser().absolute()
            try:
                create_zip_file = not artifact_object.is_file()
            except Exception:  # Hack for windows pathlib2 bug, is_file isn't valid.
                create_zip_file = True
            else:  # We assume that this is not Windows os
                if artifact_object.is_dir():
                    # change to wildcard
                    artifact_object /= '*'

            if create_zip_file:
                folder = Path('').joinpath(*artifact_object.parts[:-1])
                if not folder.is_dir() or not folder.parts:
                    raise ValueError(
                        "Artifact file/folder '{}' could not be found".format(
                            artifact_object.as_posix()))

                wildcard = artifact_object.parts[-1]
                files = list(Path(folder).rglob(wildcard))
                override_filename_ext_in_uri = '.zip'
                override_filename_in_uri = folder.parts[
                    -1] + override_filename_ext_in_uri
                fd, zip_file = mkstemp(
                    prefix=quote(folder.parts[-1], safe="") + '.',
                    suffix=override_filename_ext_in_uri)
                try:
                    artifact_type_data.content_type = 'application/zip'
                    artifact_type_data.preview = 'Archive content {}:\n'.format(
                        artifact_object.as_posix())

                    with ZipFile(zip_file,
                                 'w',
                                 allowZip64=True,
                                 compression=ZIP_DEFLATED) as zf:
                        for filename in sorted(files):
                            if filename.is_file():
                                relative_file_name = filename.relative_to(
                                    folder).as_posix()
                                artifact_type_data.preview += '{} - {}\n'.format(
                                    relative_file_name,
                                    humanfriendly.format_size(
                                        filename.stat().st_size))
                                zf.write(filename.as_posix(),
                                         arcname=relative_file_name)
                except Exception as e:
                    # failed uploading folder:
                    LoggerRoot.get_base_logger().warning(
                        'Exception {}\nFailed zipping artifact folder {}'.
                        format(folder, e))
                    return None
                finally:
                    os.close(fd)

                artifact_object = zip_file
                artifact_type = 'archive'
                artifact_type_data.content_type = mimetypes.guess_type(
                    artifact_object)[0]
                local_filename = artifact_object
                delete_after_upload = True
            else:
                if not artifact_object.is_file():
                    raise ValueError(
                        "Artifact file '{}' could not be found".format(
                            artifact_object.as_posix()))

                override_filename_in_uri = artifact_object.parts[-1]
                artifact_object = artifact_object.as_posix()
                artifact_type = 'custom'
                artifact_type_data.content_type = mimetypes.guess_type(
                    artifact_object)[0]
                local_filename = artifact_object
        else:
            raise ValueError("Artifact type {} not supported".format(
                type(artifact_object)))

        # remove from existing list, if exists
        for artifact in self._task_artifact_list:
            if artifact.key == name:
                if artifact.type == self._pd_artifact_type:
                    raise ValueError(
                        "Artifact of name {} already registered, "
                        "use register_artifact instead".format(name))

                self._task_artifact_list.remove(artifact)
                break

        if not local_filename:
            file_size = None
            file_hash = None
        else:
            # check that the file to upload exists
            local_filename = Path(local_filename).absolute()
            if not local_filename.exists() or not local_filename.is_file():
                LoggerRoot.get_base_logger().warning(
                    'Artifact upload failed, cannot find file {}'.format(
                        local_filename.as_posix()))
                return False

            file_hash, _ = self.sha256sum(local_filename.as_posix())
            file_size = local_filename.stat().st_size

            uri = self._upload_local_file(
                local_filename,
                name,
                delete_after_upload=delete_after_upload,
                override_filename=override_filename_in_uri,
                override_filename_ext=override_filename_ext_in_uri)

        timestamp = int(time())

        artifact = tasks.Artifact(
            key=name,
            type=artifact_type,
            uri=uri,
            content_size=file_size,
            hash=file_hash,
            timestamp=timestamp,
            type_data=artifact_type_data,
            display_data=[(str(k), str(v))
                          for k, v in metadata.items()] if metadata else None)

        # update task artifacts
        with self._task_edit_lock:
            self._task_artifact_list.append(artifact)
            self._task.set_artifacts(self._task_artifact_list)

        return True

    def flush(self):
        # start the thread if it hasn't already:
        self._start()
        # flush the current state of all artifacts
        self._flush_event.set()

    def stop(self, wait=True):
        # stop the daemon thread and quit
        # wait until thread exists
        self._exit_flag = True
        self._flush_event.set()
        if wait:
            if self._thread:
                self._thread.join()
            # remove all temp folders
            for f in self._temp_folder:
                try:
                    Path(f).rmdir()
                except Exception:
                    pass

    def _start(self):
        """ Start daemon thread if any artifacts are registered and thread is not up yet """
        if not self._thread and self._artifacts_container:
            # start the daemon thread
            self._flush_event.clear()
            self._thread = Thread(target=self._daemon)
            self._thread.daemon = True
            self._thread.start()

    def _daemon(self):
        while not self._exit_flag:
            self._flush_event.wait(self._flush_frequency_sec)
            self._flush_event.clear()
            artifact_keys = list(self._artifacts_container.keys())
            for name in artifact_keys:
                try:
                    self._upload_data_audit_artifacts(name)
                except Exception as e:
                    LoggerRoot.get_base_logger().warning(str(e))

        # create summary
        self._summary = self._get_statistics()

    def _upload_data_audit_artifacts(self, name):
        logger = self._task.get_logger()
        pd_artifact = self._artifacts_container.get(name)
        pd_metadata = self._artifacts_container.get_metadata(name)

        # remove from artifacts watch list
        if name in self._unregister_request:
            try:
                self._unregister_request.remove(name)
            except KeyError:
                pass
            self._artifacts_container.unregister_artifact(name)

        if pd_artifact is None:
            return

        override_filename_ext_in_uri = self._save_format
        override_filename_in_uri = name
        fd, local_csv = mkstemp(prefix=quote(name, safe="") + '.',
                                suffix=override_filename_ext_in_uri)
        os.close(fd)
        local_csv = Path(local_csv)
        pd_artifact.to_csv(local_csv.as_posix(),
                           index=False,
                           compression=self._compression)
        current_sha2, file_sha2 = self.sha256sum(local_csv.as_posix(),
                                                 skip_header=32)
        if name in self._last_artifacts_upload:
            previous_sha2 = self._last_artifacts_upload[name]
            if previous_sha2 == current_sha2:
                # nothing to do, we can skip the upload
                try:
                    local_csv.unlink()
                except Exception:
                    pass
                return
        self._last_artifacts_upload[name] = current_sha2

        # If old trains-server, upload as debug image
        if not Session.check_min_api_version('2.3'):
            logger.report_image(title='artifacts',
                                series=name,
                                local_path=local_csv.as_posix(),
                                delete_after_upload=True,
                                iteration=self._task.get_last_iteration(),
                                max_image_history=2)
            return

        # Find our artifact
        artifact = None
        for an_artifact in self._task_artifact_list:
            if an_artifact.key == name:
                artifact = an_artifact
                break

        file_size = local_csv.stat().st_size

        # upload file
        uri = self._upload_local_file(
            local_csv,
            name,
            delete_after_upload=True,
            override_filename=override_filename_in_uri,
            override_filename_ext=override_filename_ext_in_uri)

        # update task artifacts
        with self._task_edit_lock:
            if not artifact:
                artifact = tasks.Artifact(key=name,
                                          type=self._pd_artifact_type)
                self._task_artifact_list.append(artifact)
            artifact_type_data = tasks.ArtifactTypeData()

            artifact_type_data.data_hash = current_sha2
            artifact_type_data.content_type = "text/csv"
            artifact_type_data.preview = str(
                pd_artifact.__repr__()) + '\n\n' + self._get_statistics(
                    {name: pd_artifact})

            artifact.type_data = artifact_type_data
            artifact.uri = uri
            artifact.content_size = file_size
            artifact.hash = file_sha2
            artifact.timestamp = int(time())
            artifact.display_data = [
                (str(k), str(v)) for k, v in pd_metadata.items()
            ] if pd_metadata else None

            self._task.set_artifacts(self._task_artifact_list)

    def _upload_local_file(self,
                           local_file,
                           name,
                           delete_after_upload=False,
                           override_filename=None,
                           override_filename_ext=None):
        """
        Upload local file and return uri of the uploaded file (uploading in the background)
        """
        upload_uri = self._task.output_uri or self._task.get_logger(
        ).get_default_upload_destination()
        if not isinstance(local_file, Path):
            local_file = Path(local_file)
        ev = UploadEvent(
            metric='artifacts',
            variant=name,
            image_data=None,
            upload_uri=upload_uri,
            local_image_path=local_file.as_posix(),
            delete_after_upload=delete_after_upload,
            override_filename=override_filename,
            override_filename_ext=override_filename_ext,
            override_storage_key_prefix=self._get_storage_uri_prefix())
        _, uri = ev.get_target_full_upload_uri(upload_uri)

        # send for upload
        self._task.reporter._report(ev)

        return uri

    def _get_statistics(self, artifacts_dict=None):
        summary = ''
        artifacts_dict = artifacts_dict or self._artifacts_container
        thread_pool = ThreadPool()

        try:
            # build hash row sets
            artifacts_summary = []
            for a_name, a_df in artifacts_dict.items():
                hash_cols = self._artifacts_container.get_hash_columns(a_name)
                if not pd or not isinstance(a_df, pd.DataFrame):
                    continue

                if hash_cols is True:
                    hash_col_drop = []
                else:
                    hash_cols = set(hash_cols)
                    missing_cols = hash_cols.difference(a_df.columns)
                    if missing_cols == hash_cols:
                        LoggerRoot.get_base_logger().warning(
                            'Uniqueness columns {} not found in artifact {}. '
                            'Skipping uniqueness check for artifact.'.format(
                                list(missing_cols), a_name))
                        continue
                    elif missing_cols:
                        # missing_cols must be a subset of hash_cols
                        hash_cols.difference_update(missing_cols)
                        LoggerRoot.get_base_logger().warning(
                            'Uniqueness columns {} not found in artifact {}. Using {}.'
                            .format(list(missing_cols), a_name,
                                    list(hash_cols)))

                    hash_col_drop = [
                        col for col in a_df.columns if col not in hash_cols
                    ]

                a_unique_hash = set()

                def hash_row(r):
                    a_unique_hash.add(hash(bytes(r)))

                a_shape = a_df.shape
                # parallelize
                a_hash_cols = a_df.drop(columns=hash_col_drop)
                thread_pool.map(hash_row, a_hash_cols.values)
                # add result
                artifacts_summary.append((
                    a_name,
                    a_shape,
                    a_unique_hash,
                ))

            # build intersection summary
            for i, (name, shape, unique_hash) in enumerate(artifacts_summary):
                summary += '[{name}]: shape={shape}, {unique} unique rows, {percentage:.1f}% uniqueness\n'.format(
                    name=name,
                    shape=shape,
                    unique=len(unique_hash),
                    percentage=100 * len(unique_hash) / float(shape[0]))
                for name2, shape2, unique_hash2 in artifacts_summary[i + 1:]:
                    intersection = len(unique_hash & unique_hash2)
                    summary += '\tIntersection with [{name2}] {intersection} rows: {percentage:.1f}%\n'.format(
                        name2=name2,
                        intersection=intersection,
                        percentage=100 * intersection /
                        float(len(unique_hash2)))
        except Exception as e:
            LoggerRoot.get_base_logger().warning(str(e))
        finally:
            thread_pool.close()
            thread_pool.terminate()
        return summary

    def _get_temp_folder(self, force_new=False):
        if force_new or not self._temp_folder:
            new_temp = mkdtemp(prefix='artifacts_')
            self._temp_folder.append(new_temp)
            return new_temp
        return self._temp_folder[0]

    def _get_storage_uri_prefix(self):
        if not self._storage_prefix:
            self._storage_prefix = self._task._get_output_destination_suffix()
        return self._storage_prefix

    @staticmethod
    def sha256sum(filename, skip_header=0):
        # create sha2 of the file, notice we skip the header of the file (32 bytes)
        # because sometimes that is the only change
        h = hashlib.sha256()
        file_hash = hashlib.sha256()
        b = bytearray(Artifacts._hash_block_size)
        mv = memoryview(b)
        try:
            with open(filename, 'rb', buffering=0) as f:
                # skip header
                if skip_header:
                    file_hash.update(f.read(skip_header))
                for n in iter(lambda: f.readinto(mv), 0):
                    h.update(mv[:n])
                    if skip_header:
                        file_hash.update(mv[:n])
        except Exception as e:
            LoggerRoot.get_base_logger().warning(str(e))
            return None, None

        return h.hexdigest(), file_hash.hexdigest() if skip_header else None
Exemplo n.º 58
0
class EventsDevice(Process):
    def __init__(self, log_dir):
        super(EventsDevice, self).__init__()

        self._ready = Event()
        self._pub_port = Value('d', 0)
        self._sub_port = Value('d', 0)

        self.event_log_base_dir = os.path.join(log_dir, 'events_log')
        makedirs(self.event_log_base_dir)
        self.raw_events_filename = os.path.join(self.event_log_base_dir,
                                                'raw_events.log')

    def run(self):
        # pylint: disable=no-member; disable `no-member' messages because of zmq.

        context = frontend = backend = None

        try:
            context = zmq.Context(1)

            # Socket facing clients.
            frontend = context.socket(zmq.SUB)
            self._pub_port.value = frontend.bind_to_random_port("tcp://*")
            frontend.setsockopt(zmq.SUBSCRIBE, b"")
            frontend.setsockopt(zmq.LINGER, 0)

            # Socket facing services.
            backend = context.socket(zmq.PUB)
            self._sub_port.value = backend.bind_to_random_port("tcp://*")
            backend.setsockopt(zmq.LINGER, 0)

            self._ready.set()

            LOGGER.info("EventsDevice listen on pub_port=%d, sub_port=%d",
                        self._pub_port.value, self._sub_port.value)
            zmq.proxy(frontend, backend)
        except Exception:  # pylint: disable=broad-except
            LOGGER.exception("zmq device failed")
        except (KeyboardInterrupt, SystemExit) as ex:
            LOGGER.debug("EventsDevice was halted by %s",
                         ex.__class__.__name__)
        finally:
            if frontend:
                frontend.close()
            if backend:
                backend.close()
            if context:
                context.term()

    @staticmethod
    @timeout(timeout=120)
    def wait_till_event_loop_is_working(number_of_events):
        """
        It waits 120 seconds till row of {number_of_events} events is delivered with no loss
        """
        for _ in range(number_of_events):
            try:
                StartupTestEvent().publish(guaranteed=True)
            except TimeoutError:
                raise RuntimeError("Event loop is not working properly")

    @property
    def sub_port(self):
        if self._ready.wait(timeout=EVENTS_DEVICE_START_TIMEOUT):
            return self._sub_port
        raise RuntimeError("EventsDevice is not ready to send events.")

    @property
    def pub_port(self):
        if self._ready.wait(timeout=EVENTS_DEVICE_START_TIMEOUT):
            return self._pub_port
        raise RuntimeError("EventsDevice is not ready to receive events.")

    def get_client_socket(self, filter_type=b''):
        context = zmq.Context()
        socket = context.socket(zmq.SUB)  # pylint: disable=no-member
        socket.connect("tcp://localhost:%d" % self.sub_port.value)
        socket.setsockopt(zmq.SUBSCRIBE, filter_type)  # pylint: disable=no-member
        return socket

    def subscribe_events(self, filter_type=b'', stop_event=None):
        # pylint: disable=too-many-nested-blocks,too-many-branches
        LOGGER.info("subscribe to server with port %d", self.sub_port.value)
        socket = self.get_client_socket(filter_type)
        filters = dict()
        try:
            while stop_event is None or not stop_event.isSet():
                if socket.poll(timeout=1):
                    obj = socket.recv_pyobj()

                    # remove filter objects when log event timestamp on the
                    # specific node is bigger the time filter was canceled
                    if isinstance(obj, DatabaseLogEvent):
                        for filter_key, filter_obj in list(filters.items()):
                            if filter_obj.expire_time and filter_obj.expire_time < obj.timestamp:
                                del filters[filter_key]

                    obj_filtered = any(
                        [f.eval_filter(obj) for f in filters.values()])

                    if isinstance(obj, DbEventsFilter):
                        if not obj.clear_filter:
                            filters[obj.id] = obj
                        else:
                            object_filter = filters.get(obj.id, None)
                            if object_filter is None:
                                filters[obj.id] = obj
                            else:
                                filters[obj.id].expire_time = obj.expire_time
                            if not obj.expire_time:
                                del filters[obj.id]

                    obj_filtered = obj_filtered or isinstance(obj, SystemEvent)
                    if not obj_filtered:

                        yield obj.__class__.__name__, obj
        except (KeyboardInterrupt, SystemExit) as ex:
            LOGGER.debug("%s - subscribe_events was halted by %s",
                         current_process().name, ex.__class__.__name__)
        socket.close()

    def publish_event(self, event):
        context = zmq.Context()
        socket = context.socket(zmq.PUB)  # pylint: disable=no-member
        socket.connect("tcp://localhost:%d" % self.pub_port.value)
        time.sleep(0.01)

        socket.send_pyobj(event)
        with open(self.raw_events_filename, 'a+') as log_file:
            log_file.write(event.to_json() + '\n')
        socket.close()
        return True

    @retrying(n=3, sleep_time=0, allowed_exceptions=TimeoutError)
    def publish_event_guaranteed(self, event):
        client_socket = self.get_client_socket()
        self.publish_event(event)
        end_time = time.time() + 2
        while time.time() < end_time:
            # This iteration cycle needed to make sure that it does not stop on getting very first event,
            # which could be event not it is looking for, but something that is generated in other Thread
            try:
                if not client_socket.poll(timeout=1):
                    continue
                received_event = client_socket.recv_pyobj(flags=1)
                if event == received_event:
                    return True
            except zmq.ZMQError:
                continue
        raise TimeoutError(f"Event {str(self)} was not delivered")
Exemplo n.º 59
0
    d['2'] = 2
    d[0.25] = None
    l.reverse()
    ev.set()  #wake up main


if __name__ == '__main__':
    event = Event()  #flag is false
    lock = RLock()
    #Data can be stored in a shared memory map using Value or Array
    num = Value(
        'd', 0.0)  #typecode from array module, eg int(i), double(d), long(l),
    arr = Array('i', range(10))
    p1 = Process(target=sharedf, args=(num, arr, event))
    p1.start()
    event.wait()  #or call event.is_set()
    with lock:
        print(num.value)
        print(arr[:])

    #Manager
    #controls a server process which holds Python objects
    #and allows other processes to manipulate them using proxies
    #supports list, dict, Namespace,
    #Lock, RLock, Semaphore, BoundedSemaphore, Condition, Event, Queue,
    #Value and Array
    manager = Manager()
    event2 = manager.Event()
    d = manager.dict()
    l = manager.list(range(10))
    p2 = Process(target=managerf, args=(d, l, event2))
Exemplo n.º 60
0
class WriteBack(object):

    _changed = False
    _process = None
    chunk_size = 100

    def __init__(self, db, storage):
        self._db = db
        self._storage = storage

    def close(self):
        if self._process:
            self._stop.set()
            self._event.set()
            self._process.join()

    def changed(self):
        self._changed = True

    def committed(self):
        if self._changed:
            self._changed = False
            if self._process:
                self._event.set()
            else:
                if FORK:
                    from multiprocessing import Process, Event
                else:
                    from threading import Thread as Process, Event
                self._event = Event()
                self._idle = Event()
                self._stop = Event()
                self._np = 1 + self._db._getMaxPartition()
                self._db = cPickle.dumps(self._db, 2)
                self._process = Process(target=self._run)
                self._process.daemon = True
                self._process.start()

    @property
    def wait(self):
        # For unit tests.
        return self._idle.wait

    def _run(self):
        util.setproctitle('neostorage: write back')
        self._db = cPickle.loads(self._db)
        try:

            @self._db.autoReconnect
            def _():
                # Unfortunately, copyTransactionsFrom does not abort in case
                # of failure, so we have to reopen.
                zodb = storageFromString(self._storage)
                try:
                    self.min_tid = util.add64(zodb.lastTransaction(), 1)
                    zodb.copyTransactionsFrom(self)
                finally:
                    zodb.close()
        finally:
            self._idle.set()
            self._db.close()

    def iterator(self):
        db = self._db
        np = self._np
        offset_list = xrange(np)
        while 1:
            with db:
                # Check the partition table at the beginning of every
                # transaction. Once the import is finished and at least one
                # cell is replicated, it is possible that some of this node
                # get outdated. In this case, wait for the next PT change.
                if np == len(db._readable_set):
                    while 1:
                        tid_list = []
                        max_tid = MAX_TID
                        for offset in offset_list:
                            x = db.getReplicationTIDList(
                                self.min_tid, max_tid, self.chunk_size, offset)
                            tid_list += x
                            if len(x) == self.chunk_size:
                                max_tid = x[-1]
                        if not tid_list:
                            break
                        tid_list.sort()
                        for tid in tid_list:
                            if self._stop.is_set():
                                return
                            yield TransactionRecord(db, tid)
                            if tid == max_tid:
                                break
                        else:
                            self.min_tid = util.add64(tid, 1)
                            break
                        self.min_tid = util.add64(tid, 1)
            if not self._event.is_set():
                self._idle.set()
                self._event.wait()
                self._idle.clear()
            self._event.clear()
            if self._stop.is_set():
                break