Example #1
0
    def test_term_thread(self):
        """ctx.term should not crash active threads (#139)"""
        ctx = zmq.Context()

        def block():
            s = ctx.socket(zmq.REP)
            s.bind_to_random_port("tcp://127.0.0.1")
            try:
                s.recv()
            except zmq.ZMQError:
                e = sys.exc_info()[1]
                if e.errno == zmq.ETERM:
                    # context terminated, this is supposed to happen
                    pass
                else:
                    raise
            s.close()

        t = Thread(target=block)
        t.start()
        if sys.version[:3] == "2.5":
            t.is_alive = t.isAlive
        ctx.term()
        t.join(timeout=1)
        self.assertFalse(t.is_alive(), "term should have interrupted s.recv()")
Example #2
0
def start_forwarder(conn, addr):
    proxy_addr = random_proxy()
    try:
        s = try_connect(proxy_addr)
    except:
        print('#conn faild')
        sleep(0.1)
        try:
            s = try_connect(proxy_add)
        except:
            #return_proxy(proxy_addr)
            return False
    print('#conn start')
    t1 = Thread(target=data_forward_func(conn, s))
    t2 = Thread(target=data_forward_func(s, conn))
    t1.start()
    t2.start()
    while True:
        if t1.is_alive() and t2.is_alive():
            sleep(0.5)
        else:
            break
    try:
        s.close()
        conn.close()
    except Exception as e:
        print('#close error: ', e)
    print('##conn end')
    return_proxy(proxy_addr)
    return True
    def run(self, input_fname, ids_per_job, stagger=0, **wait_params):
        """Run this submission all the way.

        This method will run both `submit_reading` and `watch_and_wait`,
        blocking on the latter.
        """

        submit_thread = Thread(target=self.submit_reading,
                               args=(input_fname, 0, None, ids_per_job),
                               kwargs={'stagger': stagger},
                               daemon=True)
        submit_thread.start()
        try:
            logger.info("Waiting for just a sec...")
            sleep(1)
            wait_params['wait_for_first_job'] = True
            wait_params['kill_on_exception'] = True
            self.watch_and_wait(**wait_params)
            submit_thread.join(0)
            if submit_thread.is_alive():
                logger.warning("Submit thread is still running even after job "
                               "completion.")
        except BaseException as e:
            logger.error("Watch and wait failed...")
            logger.exception(e)
        finally:
            logger.info("Aborting jobs...")
            # Send a signal to the submission loop (on a thread) to stop.
            self.running = False
            submit_thread.join()
            print(submit_thread.is_alive())

        self.running = None
        return submit_thread
Example #4
0
class Timer():
    def __init__(self, delay, target=None):
        if not target is None:
            self.run = target

        self.delay = delay

        self.running = False
        self._t = Thread()

    def _run(self):
        while self.running:
            self.run()
            time.sleep(self.delay)

    def start(self):
        if self._t.is_alive() and not self.running:
            self._t.join()

        if not self._t.is_alive():
            self.running = True
            self._t = Thread(target=self._run)
            self._t.start()

    def pause(self):
        self.running = False
Example #5
0
    def test_direct(self):

        def send_nonblock():
            session = self.publish.direct.get("/test/direct1", {
                                              "type": "direct1",
                                              "message": "hi"},
                                              block=False)
            self.session.resolve(session["message"].id, None)
        thread = Thread(target=send_nonblock, args=())
        thread.daemon = True
        thread.start()
        sleep(0.5)
        self.assertEqual(len(self.session.session_list), 1)
        for session in self.session.session_list.itervalues():
            session["status"] = Status.SENT
            session["is_published"].set()
        thread.join(0.5)
        self.assertFalse(thread.is_alive())

        def send_block():
            self.publish.direct.get("/test/direct2",
                                    {"type": "direct2", "message": "hi"},
                                    block=True)
        thread = Thread(target=send_block, args=())
        thread.daemon = True
        thread.start()
        sleep(0.5)
        self.assertEqual(len(self.session.session_list), 1)
        session = self.session.session_list.values()[0]
        self.session.resolve(session["message"].id, session["mid"])
        thread.join(0.5)
        self.assertFalse(thread.is_alive())
    def test_don_quixote_dictionary(self):
        archive_file, output_file = gen_archive_output_pair(
            'don_quixote_dictionary')

        don = DonQuixote(
            blueprints={"blueprints": [
                {
                    "service": "cli_emitter",
                    "args": "-f "
                            "test_data/inputs/don_quixote_dictionary._input"
                            " --output_sock_url tcp://*:9997 -d 0"
                },
                {
                    "service": "cli_listener",
                    "args": "-f "
                            "test_out/don_quixote_dictionary._output"
                            " --input_sock_url tcp://localhost:9997"
                }
            ]},
            disable_keyboard=True)
        assert don

        t = Thread(target=don.run)
        t.start()
        time.sleep(2)
        assert t.is_alive()
        don.kill()
        t.join(2)
        assert not t.is_alive()

        self.assertFiles(archive_file, output_file)
Example #7
0
        def test_record_lock(self):
            shlock = SHLock()
            lock = RecordLock(shlock)

            shlock.acquire()
            self.assertRaises(LockingError, lock.lock, 1)
            shlock.release()
            with lock.lock(1):
                with lock.lock(1):
                    pass

            def dolock():
                with lock.lock(1):
                    time.sleep(0.1)

            t = Thread(target=dolock)
            t.daemon = True
            with lock.lock(1):
                t.start()
                t.join(0.2)
                self.assertTrue(t.is_alive())
            t.join(0.11)
            self.assertFalse(t.is_alive())

            t = Thread(target=dolock)
            t.daemon = True
            with lock.lock(2):
                t.start()
                t.join(0.11)
                self.assertFalse(t.is_alive())
Example #8
0
  def test_z_threaded_real_world_run(self):
    uid = 1
    jid = 5
    key='stage'
    new_value='done'
    mock_job_data = dict(zip(self.dummy_rtd.valid_keys, self.dummy_rtd.default_values))

    # Setup and verify precondition
    self.dummy_rtd.data = {uid:{jid:mock_job_data}}
    res = self.dummy_rtd.getJob(uid, jid)
    self.assertDictEqual(mock_job_data, res)

    t = Thread(target=self.dummy_rtd.set_attribute, args=(uid, jid, key, new_value))

    self.dummy_rtd.data_lock.acquire()
    t.start()
    time.sleep(1)
    self.assertTrue(t.is_alive())
    self.dummy_rtd.data_lock.release()
    t.join()
    self.assertFalse(t.is_alive())

    # Verify change
    mock_job_data[key]=new_value
    self.assertDictEqual(mock_job_data, self.dummy_rtd.data[uid][jid])
Example #9
0
def request(method, url, direct_access, argv, app=None):
    global ASGI_APPLICATION
    ret = []
    if direct_access and ASGI_APPLICATION:
        post = True if method == 'post' else False
        h = argv['headers']
        headers = []
        for key, value in h.items():
            headers.append((key.encode('utf-8'), value.encode('utf-8')))
        cookies = ""
        if 'cookies' in argv:
            for key, value in argv['cookies'].items():
                value2 = value.split(';',1)[0]
                cookies += f"{key}={value2};"
        if cookies:
            headers.append((b"cookie", cookies.encode('utf-8')))

        if post:
            t = Thread(target=asgi_get_or_post,
                       args=(ASGI_APPLICATION, url.replace('http://127.0.0.2', ''), headers, argv['data'], post, ret),
                       daemon=True)
            t.start()
            if app:
                try:
                    while t.is_alive():
                        app.Yield()
                except:
                    t.join()
            else:
                t.join()
        else:
            t = Thread(target=asgi_get_or_post,
                       args=(ASGI_APPLICATION, url.replace('http://127.0.0.2', ''), headers, {}, post, ret),
                       daemon=True)
            t.start()
            if app:
                try:
                    while t.is_alive():
                        app.Yield()
                except:
                    t.join()
            else:
                t.join()
        return RetHttp(url, ret[0])
    else:
        if app:
            t = Thread(target=requests_request,
                       args=(method, url, argv, ret),
                       daemon=True)
            t.start()
            try:
                while t.is_alive():
                    app.Yield()
            except:
                t.join()
        else:
            requests_request(method, url, argv, ret)
        return ret[0]
Example #10
0
class Ntp(object):

    def __init__(self, model):
        self.model = model
        self._ntp_deamon_event = Event()
        self._ntp_thread = Thread(target=self._ntp_update)
        self._ntp_thread.daemon = True
        if self.model.db["ntp"]["enable"] is True:
            NtpDate(self.model.db["ntp"]["server"])
            self.start()

    def update(self, config):
        # Update config
        self.model.db["ntp"] = dict(
            self.model.db["ntp"].items() + config.items())

        # restart ntp daemon, if enable otherwise stop it.
        self.stop()
        if self.model.db["ntp"]["enable"] is True:
            NtpDate(self.model.db["ntp"]["server"])
            self.start()

        self.model.save_db()
        return True

    def stop(self):
        _logger.debug("stop ntp daemon")
        if self._ntp_thread.is_alive():
            self._ntp_deamon_event.set()
            self._ntp_thread.join()
            # reinitialize Thread Object
            self._ntp_deamon_event.clear()
            self._ntp_thread = Thread(target=self._ntp_update)
            self._ntp_thread.daemon = True
            return True
        return False

    def start(self):
        _logger.debug("start ntp daemon")
        if self._ntp_thread.is_alive():
            raise RuntimeError("Stop previous ntp daemon first.")

        self._ntp_thread.start()

    def _ntp_update(self):
        prev_time = time()
        while not self._ntp_deamon_event.is_set():
            time_diff = math.fabs(prev_time - time())
            if time_diff < self.model.db["ntp"]["interval"]:
                sleep(1)
                continue

            try:
                NtpDate(self.model.db["ntp"]["server"])
            except Exception as e:
                _logger.warning(e)
            finally:
                prev_time = time()
Example #11
0
class EditDistanceBank():
    def __init__(self, words):
        self.words = words
        self.mapping_table = self._mapping_names(words)
        self.size = len(words)
        self.bank = np.zeros((self.size, self.size), dtype=np.int8)
        self.build_bank_thread = Thread(target=self._build_bank, name='Build editdistance bank')
        self.build_bank_thread.start()
    
    def get_max_distance(self):
        if self.build_bank_thread.is_alive():
            raise EditDistanceBank.InProgressError()
        return self.max_distance
        
    def _mapping_names(self, words):
        mapping = {}
        counter = 0
        for word in words:
            mapping[word] = counter
            counter += 1
        return mapping
    
    def _build_bank(self):
        max_value = 0
        for i in range(self.size - 1):
            for j in range(i + 1, self.size):
                if self.words[i] == '' and self.words[j] == '':
                    self.bank[i][j] = 1
                    self.bank[j][i] = 1
                else:
                    editdistance = levenshtein_dyn(self.words[i], self.words[j])
                    self.bank[i][j] = editdistance
                    self.bank[j][i] = editdistance
                    if max_value < editdistance:
                        max_value = editdistance
        self.max_distance = max_value
        return max_value

    def lookup(self, word1, word2):
        if self.build_bank_thread.is_alive():
            raise EditDistanceBank.InProgressError()
        idx1 = self.mapping_table[word1]
        idx2 = self.mapping_table[word2]
        return self.bank[idx1][idx2]
    
    def join(self):
        if self.build_bank_thread.is_alive():
            self.build_bank_thread.join()

    class InProgressError(Exception):
        def __init__(self):
            pass
            
        def __repr__(self):
            return 'Building edit distance bank in progress, please wait'
Example #12
0
 def test_eval_multithreading(self):
     ev = Event()
     self.ctx.g.func = ev.wait
     t = Thread(target=self.ctx.eval, args=("func()",))
     t.daemon = True
     t.start()
     t.join(0.01)
     self.assertTrue(t.is_alive())
     ev.set()
     t.join(1)
     self.assertFalse(t.is_alive())
Example #13
0
def decode_file(fname,res):
    if interface != None:
        try:
            p = pcap.pcapObject()
            net, mask = pcap.lookupnet(interface)
            p.open_live(interface, 1600, 0, 100)
            Message = " live capture started, using:%s\nStarting timestamp (%s) corresponds to %s"%(interface, time.time(), time.strftime('%x %X'))
            print Message
            #l.warning(Message)
            while 1:
                p.dispatch(1, Print_Packet_Tcpdump)
        except (KeyboardInterrupt, SystemExit):
            print '\n\nCRTL-C hit...\nCleaning up...'
            sys.exit()
    else:
        try:
            p = pcap.pcapObject()
            p.open_offline(fname)
            #l.warning('\n\n started, using:%s file'%(fname))
            Version = IsCookedPcap(res)
            if Version == 1:
                thread = Thread(target = p.dispatch, args = (0, Print_Packet_Cooked))
                thread.daemon=True
                thread.start()
                try:
                    while thread.is_alive():
                        thread.join(timeout=1)
                except (KeyboardInterrupt, SystemExit):
                    print '\n\nCRTL-C hit..Cleaning up...'
                    threading.Event().set()
            if Version == 2:
                thread = Thread(target = p.dispatch, args = (0, Print_Packet_Cooked))
                thread.daemon=True
                thread.start()
                try:
                    while thread.is_alive():
                        thread.join(timeout=1)
                except (KeyboardInterrupt, SystemExit):
                    print '\n\nCRTL-C hit..Cleaning up...'
                    threading.Event().set()
            if Version == 3:

                thread = Thread(target = p.dispatch, args = (0, Print_Packet_Tcpdump))
                thread.daemon=True
                thread.start()
                try:
                    while thread.is_alive():
                        thread.join(timeout=1)
                except (KeyboardInterrupt, SystemExit):
                    print '\n\nCRTL-C hit..Cleaning up...'
                    threading.Event().set()

        except Exception:
            print 'Can\'t parse %s'%(fname)
Example #14
0
 def test_term_noclose(self):
     """Context.term won't close sockets"""
     ctx = self.Context()
     s = ctx.socket(zmq.REQ)
     self.assertFalse(s.closed)
     t = Thread(target=ctx.term)
     t.start()
     t.join(timeout=0.1)
     self.assertTrue(t.is_alive(), "Context should be waiting")
     s.close()
     t.join(timeout=0.1)
     self.assertFalse(t.is_alive(), "Context should have closed")
Example #15
0
 def test_gc(self):
     """test close&term by garbage collection alone"""
     # test credit @dln (GH #137):
     def gc():
         ctx = zmq.Context()
         s = ctx.socket(zmq.PUSH)
     t = Thread(target=gc)
     t.start()
     t.join(timeout=1)
     if sys.version[:3] == '2.5':
         t.is_alive = t.isAlive
     self.assertFalse(t.is_alive(), "Garbage collection should have cleaned up context")
Example #16
0
  def test_find_or_upsert(self):
    class UpsertTest(ModelBase):
      table_name = "upserts"
      attrs = { "name": None, "type": None }

    def insert(ma, name, type):
      return ma.insert_model(UpsertTest(name=name, type=type))

    def upsert(ma, name, type, assign):
      with transaction(ma):
        assign.append(ma.find_or_upsert(UpsertTest, dict(name=name, type=type), comp=dict(name=name), return_status=True))

    with get_model_access() as ma1, get_model_access() as ma2:
      with autocommit(ma1):
        ma1.execute("drop table if exists upserts")
        ma1.execute("create table upserts (id serial not null primary key, name text not null unique, type text not null, created_at timestamptz not null default now(), updated_at timestamptz not null default now());")
        ma1.execute("truncate table upserts")

      # 1) Two transactions: a) create b) upsert a) commit - check that a and b have same id
      # 2) Two transactions: a) create b) upsert a) rollback - check that a and b have differnet ids

      with transaction(ma1):
        mod1 = insert(ma1, "Trey", "person")
        mod2a = []
        thread1 = Thread(target=lambda: upsert(ma2, "Trey", "person", mod2a))
        thread1.start()
        sleep(0.25)
        self.assertTrue(thread1.is_alive())
      thread1.join()
      mod2, mod2_status = mod2a[0]
      self.assertEqual(mod2_status, "duplicate")
      self.assertEqual(mod1.id, mod2.id)

      with transaction(ma1):
        mod3 = insert(ma1, "Julie", "person")
        mod4a = []
        thread2 = Thread(target=lambda: upsert(ma2, "Julie", "person", mod4a))
        thread2.start()
        sleep(0.25)
        self.assertTrue(thread2.is_alive())
        raise RollbackTransaction()
      thread2.join()
      mod4, mod4_status = mod4a[0]
      self.assertEqual(mod4_status, "created")
      self.assertNotEqual(mod3.id, mod4.id)

      mod5a = []
      upsert(ma1, "Trey", "person", mod5a)
      mod5, mod5_status = mod5a[0]
      self.assertEqual(mod5_status, "found")
      self.assertEqual(mod5.id, mod1.id)
Example #17
0
def handleProcessOutputBytewise(proc, fhOut, fhErr):
	q = Queue()
	errQ = Queue()
	
	t = Thread(target=enqueue_output, args=(proc.stdout, q))
	t.daemon = True # thread dies with the program
	t.start()
	
	tErr = Thread(target=enqueue_output, args=(proc.stderr, errQ))
	tErr.daemon = True # thread dies with the program
	tErr.start()
	
	writeEveryXSecs = 1.0 / 10
	lastWrite = time.time()
	
	chars = []
	errChars = []
	
	while 1:
		try:
			char = q.get(timeout=.1)
		except Empty:
			pass
		else: # got line
			chars.append(char)
		
		if not errQ.empty():
			try:
				while 1:
					errChar = errQ.get_nowait()
					errChars.append(errChar)
			except Empty:
				pass
		
		if time.time() - lastWrite > writeEveryXSecs:
			if chars:
				fhOut(''.join(chars))
				del chars[:]
				
			if errChars:
				fhErr(''.join(errChars))
				del errChars[:]
			
			QtGui.QApplication.instance().processEvents()
			
			# Process terminated?
			exitCode = proc.poll()
			
			if (exitCode is not None) and (not t.is_alive()) and (not tErr.is_alive()) and q.empty() and errQ.empty():
				return exitCode
Example #18
0
 def tearDown(self):
     contexts = set([self.context])
     while self.sockets:
         sock = self.sockets.pop()
         contexts.add(sock.context) # in case additional contexts are created
         sock.close()
     for ctx in contexts:
         t = Thread(target=ctx.term)
         t.start()
         t.join(timeout=2)
         if sys.version[:3] == '2.5':
             t.is_alive = t.isAlive
         if t.is_alive():
             raise RuntimeError("context could not terminate, open sockets likely remain in test")
Example #19
0
 def get_output(self, proc_pid, timeout=180):
     result_qu = Queue.Queue()
     comm_timer = Thread(target = self._communicate, args=(proc_pid, result_qu) )
     comm_timer.start()
     comm_timer.join(timeout)
     if comm_timer.is_alive():
         self.terminate(proc_pid)
         out, err = (None, 'SIGTERM')
         if comm_timer.is_alive():
           self.terminate(proc_pid, kill=True)
           out, err = (None, 'SIGKILL')
     else:
         out, err = result_qu.get()
     return out, err
Example #20
0
    def test_info_advances(self):
        info = game.info(self.r, 'blocked')
        info.next()  # pull out the not_exists info

        t = Thread(target=info.next)
        t.start()

        self.assertTrue(t.is_alive())
        time.sleep(0.5)
        self.assertTrue(t.is_alive())

        game.join(self.r, 'blocked', 'some dude')

        t.join(1)
        self.assertFalse(t.is_alive())
Example #21
0
class PseudoSocketServer(Server, PseudoSide):

    def __init__(self, file_path):
        if os.path.exists(file_path):
            os.remove(file_path)
        self._file_path = file_path
        self._listening = False
        self._async_listener = Thread(target=self._listen_task)

    def _listen_task(self):
        print("[server] Connects by {!s}".format(self._file_path))
        open(self._file_path, 'a').close()
        self._listening = True
        while self._listening:
            received = self.read_and_remove(self._file_path, "server")
            if received:
                print("[server] Server got message")
                answer = self.HELLO_FORMAT.format(received)
                self.append_to_file(self._file_path, answer, "server")
            time.sleep(0.25)

    def listen(self):
        self._async_listener.start()

    def close(self):
        print("[server] Shutdowns socket")
        self._listening = False

        if self._async_listener.is_alive():
            print("[server] Joins background task")
            self._async_listener.join()
Example #22
0
    def test_term_thread(self):
        """ctx.term should not crash active threads (#139)"""
        ctx = self.Context()
        evt = Event()
        evt.clear()

        def block():
            s = ctx.socket(zmq.REP)
            s.bind_to_random_port('tcp://127.0.0.1')
            evt.set()
            try:
                s.recv()
            except zmq.ZMQError as e:
                self.assertEqual(e.errno, zmq.ETERM)
                return
            finally:
                s.close()
            self.fail("recv should have been interrupted with ETERM")
        t = Thread(target=block)
        t.start()
        
        evt.wait(1)
        self.assertTrue(evt.is_set(), "sync event never fired")
        time.sleep(0.01)
        ctx.term()
        t.join(timeout=1)
        self.assertFalse(t.is_alive(), "term should have interrupted s.recv()")
class SimpleServer(object):
    """Helper class which starts a simple server listening on localhost at the specified port
    """

    def __init__(self):
        self.port = find_available_port()
        self.handler = SimpleHTTPServer.SimpleHTTPRequestHandler
        self.httpd = SocketServer.TCPServer(("", self.port), self.handler)
        self.close_signal = threading.Event()
        self.server_started = False

    def start(self, delay_sec=0.0):
        """Run the server after specified delay"""
        def run():
            self.close_signal.wait(delay_sec)

            if not self.close_signal.is_set():
                self.server_started = True
                self.httpd.serve_forever()

        self.background_thread = Thread(target=run)
        self.background_thread.start()

    def stop(self):
        self.close_signal.set()

        if self.server_started:
            self.httpd.shutdown()
        self.background_thread.join(timeout=.5)
        if self.background_thread.is_alive():
            raise Exception("SimpleServer failed to stop quickly")
Example #24
0
def Command(*cmd, **kwargs):
    """Enables to run subprocess commands in a different thread with
    TIMEOUT option!

    Based on jcollado's solution:
    http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
    and  https://gist.github.com/1306188

    """
    if kwargs.has_key("timeout"):
        timeout = kwargs["timeout"]
        del kwargs["timeout"]
    else:
        timeout = None

    process = []

    def target(process,out,*cmd,**k):
        process.append(subprocess.Popen(cmd,stdout=subprocess.PIPE,**k))
        out.put(process[0].communicate()[0])

    outQueue = Queue()
    args = [process,outQueue]
    args.extend(cmd)
    thread = Thread(target=target, args=args, kwargs=kwargs)
    thread.start()
    thread.join(timeout)
    if thread.is_alive():
        process[0].terminate()
        thread.join()
        raise Empty
    return  outQueue.get()
    def handle_noargs(self, **options):
        verbose = int(options['verbosity'])
        send_all = True

        if verbose:
            print 'Starting sending newsletters...'

        activate(settings.LANGUAGE_CODE)

        senders = SMTPServer.objects.all()
        workers = []

        for sender in senders:
            worker = SMTPMailer(sender, verbose=verbose)
            thread = Thread(target=functools.partial(worker.run, send_all), name=sender.name)
            workers.append((worker, thread))

        handler = term_handler(workers)
        for s in [signal.SIGTERM, signal.SIGINT]:
            signal.signal(s, handler)

        # first close current connection
        signals.request_finished.send(sender=self.__class__)

        for worker, thread in workers:
            thread.start()

        signal.pause()  # wait for sigterm

        for worker, thread in workers:
            if thread.is_alive():
                thread.join()

        sys.exit(0)
Example #26
0
class Worker(object):
    def __init__(self, name, unit_of_work, args=(), kwargs={}):
        self.name = name
        self.unit_of_work = unit_of_work
        self.args = args
        self.kwargs = kwargs
        self.restarts = 0
        self.thread = None

    def start(self):
        self.thread = Thread(target=self.unit_of_work.start,
                             args=self.args, kwargs=self.kwargs)
        self.thread.daemon = True
        self.thread.start()

    def is_alive(self):
        #if unit of work has been updated in the last 30 seconds, it's alive
        update_interval = datetime.now() - self.unit_of_work.last_update()
        thread_still_updating = update_interval < 30
    
        return thread_still_updating and self.thread.is_alive()

    def check(self):
        if self.is_alive():
            return
        
        self.start()

        self.restarts += 1
        print('{0} restarted {1} time(s)'.format(self.name, self.restarts))

    def stop(self):
        self.unit_of_work.stop()
class TwistedLoop(object):

    _lock = None
    _thread = None

    def __init__(self):
        self._lock = Lock()

    def maybe_start(self):
        with self._lock:
            if not reactor.running:
                self._thread = Thread(target=reactor.run,
                                      name="cassandra_driver_event_loop",
                                      kwargs={'installSignalHandlers': False})
                self._thread.daemon = True
                self._thread.start()
                atexit.register(partial(_cleanup, weakref.ref(self)))

    def _cleanup(self):
        if self._thread:
            reactor.callFromThread(reactor.stop)
            self._thread.join(timeout=1.0)
            if self._thread.is_alive():
                log.warning("Event loop thread could not be joined, so "
                            "shutdown may not be clean. Please call "
                            "Cluster.shutdown() to avoid this.")
            log.debug("Event loop thread was joined")
class Console(ConnectionListener):
	def __init__(self, host, port):
		self.hostname = host
		self.Connect((host, port))
		print "I8BP console started"
		# fetch the player ID from infinite platformer config
		self.playerid = config.Get("playerID", default=None)
		print "using PlayerID:", self.playerid
		# pass results back to the input thread
		self.q = Queue()
		# launch our threaded input loop
		self.t = Thread(target=self.InputLoop, args=(self.q,))
		self.t.daemon = True
		self.t.start()
		# initiate the connection with our ID
		connection.Send({"action": "admin", "id": self.playerid})
	
	def Loop(self):
		connection.Pump()
		self.Pump()
		if not self.t.is_alive():
			sys.exit()
	
	def InputLoop(self, q):
		console = HistoryConsole(locals())
		# horrid threaded input loop
		# continually reads from stdin and sends whatever is typed to the server
		quit = False
		while not quit:
			print q.get(block=True),
			try:
				sendcommand = console.raw_input("I8BP:" + self.hostname + "> ")
				connection.Send({"action": "console", "command": sendcommand, "id": self.playerid})
			except EOFError:
				print
				quit = True
	
	#######################################
	### Network event/message callbacks ###
	#######################################
	
	def Network_result(self, data):
		self.q.put(data['result'])
	
	def Network_permission(self, data):
		print "Permission error: ", data['permission']
		exit()
	
	# built in stuff

	def Network_connected(self, data):
		self.q.put("You are now connected to the server\n")
	
	def Network_error(self, data):
		print 'error:', data['error'][1]
		exit()
	
	def Network_disconnected(self, data):
		print 'Server disconnected\n'
		exit()
Example #29
0
def checkTimeOutPut(args):
    global currCommandProcess
    global stde
    global stdo
    stde = None
    stdo = None
    def executeCommand():
        global currCommandProcess
        global stdo
        global stde
        try:
            stdo, stde = currCommandProcess.communicate()
            printLog('stdout:\n'+str(stdo))
            printLog('stderr:\n'+str(stde))
        except:
            printLog("ERROR: UNKNOWN Exception - +checkWinTimeOutPut()::executeCommand()")

    currCommandProcess = subprocess.Popen(args,stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    thread = Thread(target=executeCommand)
    thread.start()
    thread.join(TIMOUT_VAL) #wait for the thread to complete 
    if thread.is_alive():
        printLog('ERROR: Killing the process - terminating thread because it is taking too much of time to execute')
        currCommandProcess.kill()
        printLog('ERROR: Timed out exception')
        raise errorHandler.ApplicationException(__file__, errorHandler.TIME_OUT)
    if stdo == "" or stdo==None:
        errCode = currCommandProcess.poll()
        printLog('ERROR: @@@@@Raising Called processor exception')
        raise subprocess.CalledProcessError(errCode, args, output=stde)
    return stdo
Example #30
0
def run_with_timeout_and_stack(request, timeout):
    '''
    interrupts evaluation after a given time period. provides a suitable stack environment.
    '''

    # only use set_thread_stack_size if max recursion depth was changed via the environment variable
    # MATHICS_MAX_RECURSION_DEPTH. if it is set, we always use a thread, even if timeout is None, in
    # order to be able to set the thread stack size.

    if MAX_RECURSION_DEPTH > settings.DEFAULT_MAX_RECURSION_DEPTH:
        set_thread_stack_size(python_stack_size(MAX_RECURSION_DEPTH))
    elif timeout is None:
        return request()

    queue = Queue(maxsize=1)   # stores the result or exception
    thread = Thread(target=_thread_target, args=(request, queue))
    thread.start()

    thread.join(timeout)
    if thread.is_alive():
        raise TimeoutInterrupt()

    success, result = queue.get()
    if success:
        return result
    else:
        six.reraise(*result)
class ServiceManager( object ):
    """
    Manages the scheduling of services.
    """
    def __init__(self, jobStore, toilState):
        logger.debug("Initializing service manager")
        self.jobStore = jobStore
        
        self.toilState = toilState

        self.jobGraphsWithServicesBeingStarted = set()

        self._terminate = Event() # This is used to terminate the thread associated
        # with the service manager

        self._jobGraphsWithServicesToStart = Queue() # This is the input queue of
        # jobGraphs that have services that need to be started

        self._jobGraphsWithServicesThatHaveStarted = Queue() # This is the output queue
        # of jobGraphs that have services that are already started

        self._serviceJobGraphsToStart = Queue() # This is the queue of services for the
        # batch system to start

        self.jobsIssuedToServiceManager = 0 # The number of jobs the service manager
        # is scheduling

        # Start a thread that starts the services of jobGraphs in the
        # jobsWithServicesToStart input queue and puts the jobGraphs whose services
        # are running on the jobGraphssWithServicesThatHaveStarted output queue
        self._serviceStarter = Thread(target=self._startServices,
                                     args=(self._jobGraphsWithServicesToStart,
                                           self._jobGraphsWithServicesThatHaveStarted,
                                           self._serviceJobGraphsToStart, self._terminate,
                                           self.jobStore))
        
    def start(self): 
        """
        Start the service scheduling thread.
        """
        self._serviceStarter.start()

    def scheduleServices(self, jobGraph):
        """
        Schedule the services of a job asynchronously.
        When the job's services are running the jobGraph for the job will
        be returned by toil.leader.ServiceManager.getJobGraphsWhoseServicesAreRunning.

        :param toil.jobGraph.JobGraph jobGraph: wrapper of job with services to schedule.
        """
        # Add jobGraph to set being processed by the service manager
        self.jobGraphsWithServicesBeingStarted.add(jobGraph)

        # Add number of jobs managed by ServiceManager
        self.jobsIssuedToServiceManager += sum(map(len, jobGraph.services)) + 1 # The plus one accounts for the root job

        # Asynchronously schedule the services
        self._jobGraphsWithServicesToStart.put(jobGraph)

    def getJobGraphWhoseServicesAreRunning(self, maxWait):
        """
        :param float maxWait: Time in seconds to wait to get a jobGraph before returning
        :return: a jobGraph added to scheduleServices whose services are running, or None if
        no such job is available.
        :rtype: JobGraph
        """
        try:
            jobGraph = self._jobGraphsWithServicesThatHaveStarted.get(timeout=maxWait)
            self.jobGraphsWithServicesBeingStarted.remove(jobGraph)
            assert self.jobsIssuedToServiceManager >= 0
            self.jobsIssuedToServiceManager -= 1
            return jobGraph
        except Empty:
            return None

    def getServiceJobsToStart(self, maxWait):
        """
        :param float maxWait: Time in seconds to wait to get a job before returning.
        :return: a tuple of (serviceJobStoreID, memory, cores, disk, ..) representing
        a service job to start.
        :rtype: toil.job.ServiceJobNode
        """
        try:
            serviceJob = self._serviceJobGraphsToStart.get(timeout=maxWait)
            assert self.jobsIssuedToServiceManager >= 0
            self.jobsIssuedToServiceManager -= 1
            return serviceJob
        except Empty:
            return None

    def killServices(self, services, error=False):
        """
        :param dict services: Maps service jobStoreIDs to the communication flags for the service
        """
        for serviceJobStoreID in services:
            serviceJob = services[serviceJobStoreID]
            if error:
                self.jobStore.deleteFile(serviceJob.errorJobStoreID)
            self.jobStore.deleteFile(serviceJob.terminateJobStoreID)
            
    def isActive(self, serviceJobNode):
        """
        Returns true if the service job has not been told to terminate.
        :rtype: boolean
        """
        return self.jobStore.fileExists(serviceJobNode.terminateJobStoreID)

    def isRunning(self, serviceJobNode):
        """
        Returns true if the service job has started and is active
        :rtype: boolean
        """
        return (not self.jobStore.fileExists(serviceJobNode.startJobStoreID)) and self.isActive(serviceJobNode)

    def check(self):
        """
        Check on the service manager thread.
        :raise RuntimeError: If the underlying thread has quit.
        """
        if not self._serviceStarter.is_alive():
            raise RuntimeError("Service manager has quit")

    def shutdown(self):
        """
        Cleanly terminate worker threads starting and killing services. Will block
        until all services are started and blocked.
        """
        logger.debug('Waiting for service manager thread to finish ...')
        startTime = time.time()
        self._terminate.set()
        self._serviceStarter.join()
        # Kill any services still running to avoid deadlock
        for services in list(self.toilState.servicesIssued.values()):
            self.killServices(services, error=True)
        logger.debug('... finished shutting down the service manager. Took %s seconds', time.time() - startTime)

    @staticmethod
    def _startServices(jobGraphsWithServicesToStart,
                       jobGraphsWithServicesThatHaveStarted,
                       serviceJobsToStart,
                       terminate, jobStore):
        """
        Thread used to schedule services.
        """
        servicesThatAreStarting = set()
        servicesRemainingToStartForJob = {}
        serviceToJobGraph = {}
        while True:
            with throttle(1.0):
                if terminate.is_set():
                    logger.debug('Received signal to quit starting services.')
                    break
                try:
                    jobGraph = jobGraphsWithServicesToStart.get_nowait()
                    if len(jobGraph.services) > 1:
                        # Have to fall back to the old blocking behavior to
                        # ensure entire service "groups" are issued as a whole.
                        blockUntilServiceGroupIsStarted(jobGraph,
                                                        jobGraphsWithServicesThatHaveStarted,
                                                        serviceJobsToStart, terminate, jobStore)
                        continue
                    # Found a new job that needs to schedule its services.
                    for serviceJob in jobGraph.services[0]:
                        serviceToJobGraph[serviceJob] = jobGraph
                    servicesRemainingToStartForJob[jobGraph] = len(jobGraph.services[0])
                    # Issue the service jobs all at once.
                    for serviceJob in jobGraph.services[0]:
                        logger.debug("Service manager is starting service job: %s, start ID: %s", serviceJob, serviceJob.startJobStoreID)
                        serviceJobsToStart.put(serviceJob)
                    # We should now start to monitor these services to see if
                    # they've started yet.
                    servicesThatAreStarting.update(jobGraph.services[0])
                except Empty:
                    # No new jobs that need services scheduled.
                    pass

                for serviceJob in list(servicesThatAreStarting):
                    if not jobStore.fileExists(serviceJob.startJobStoreID):
                        # Service has started!
                        servicesThatAreStarting.remove(serviceJob)
                        parentJob = serviceToJobGraph[serviceJob]
                        servicesRemainingToStartForJob[parentJob] -= 1
                        assert servicesRemainingToStartForJob[parentJob] >= 0
                        del serviceToJobGraph[serviceJob]

                # Find if any jobGraphs have had *all* their services started.
                jobGraphsToRemove = set()
                for jobGraph, remainingServices in servicesRemainingToStartForJob.items():
                    if remainingServices == 0:
                        jobGraphsWithServicesThatHaveStarted.put(jobGraph)
                        jobGraphsToRemove.add(jobGraph)
                for jobGraph in jobGraphsToRemove:
                    del servicesRemainingToStartForJob[jobGraph]
class DaskRemoteService(object):
    def __init__(self,
                 remote_addr,
                 scheduler_port,
                 ssh_username=None,
                 ssh_port=22,
                 ssh_private_key=None,
                 remote_python=None):

        self.scheduler_addr = remote_addr
        self.scheduler_port = scheduler_port

        self.ssh_username = ssh_username
        self.ssh_port = ssh_port
        self.ssh_private_key = ssh_private_key
        self.remote_python = remote_python
        self.monitor_thread = Thread()

        # Start the scheduler node
        self.scheduler = start_scheduler(
            remote_addr,
            scheduler_port,
            ssh_username,
            ssh_port,
            ssh_private_key,
            remote_python,
        )
        # Start worker nodes
        self.worker = start_worker(
            self.scheduler_addr,
            self.scheduler_port,
            remote_addr,
            self.ssh_username,
            self.ssh_port,
            self.ssh_private_key,
            self.remote_python,
        )
        self.start_monitoring()
        self.status = "live"

    def start_monitoring(self):
        if self.monitor_thread.is_alive():
            return
        self.monitor_thread = Thread(target=self.monitor_remote_processes)
        #self.monitor_thread.daemon = True
        self.monitor_thread.start()

    def monitor_remote_processes(self):
        all_processes = [self.scheduler, self.worker]
        try:
            while True:
                for process in all_processes:
                    while not process["output_queue"].empty():
                        try:
                            msg = process["output_queue"].get()
                            if 'distributed.' in msg:
                                msg = msg.replace('distributed', 'autogluon')

                            print(msg)
                        except Exception as e:
                            print(
                                f'Exception happend {e}, terminating the remote.'
                            )
                            break
                # Kill some time and free up CPU
                time.sleep(0.1)

        except KeyboardInterrupt:
            pass

    def shutdown(self):
        all_processes = [self.worker, self.scheduler]

        for process in all_processes:
            process["input_queue"].put("shutdown")
            process["thread"].join()
        self.status = "closed"
Example #33
0
class Adder(QObject):

    do_one_signal = pyqtSignal()

    def __init__(self,
                 source,
                 single_book_per_directory=True,
                 db=None,
                 parent=None,
                 callback=None,
                 pool=None,
                 list_of_archives=False):
        if not validate_source(source, parent):
            return
        QObject.__init__(self, parent)
        self.single_book_per_directory = single_book_per_directory
        self.ignore_opf = False
        self.list_of_archives = list_of_archives
        self.callback = callback
        self.add_formats_to_existing = prefs['add_formats_to_existing']
        self.do_one_signal.connect(self.tick, type=Qt.QueuedConnection)
        self.pool = pool
        self.pd = ProgressDialog(_('Adding books...'),
                                 _('Scanning for files...'),
                                 min=0,
                                 max=0,
                                 parent=parent,
                                 icon='add_book.png')
        self.db = getattr(db, 'new_api', None)
        if self.db is not None:
            self.dbref = weakref.ref(db)
        self.source = source
        self.tdir = PersistentTemporaryDirectory('_add_books')
        self.scan_error = None
        self.file_groups = OrderedDict()
        self.abort_scan = False
        self.duplicates = []
        self.report = []
        self.items = []
        self.added_book_ids = set()
        self.merged_books = set()
        self.added_duplicate_info = set()
        self.pd.show()

        self.scan_thread = Thread(target=self.scan, name='ScanBooks')
        self.scan_thread.daemon = True
        self.scan_thread.start()
        self.do_one = self.monitor_scan
        self.do_one_signal.emit()
        if DEBUG:
            self.start_time = time.time()

    def break_cycles(self):
        self.abort_scan = True
        self.pd.close()
        self.pd.deleteLater()
        if self.pool is not None:
            self.pool.shutdown()
        if not self.items:
            shutil.rmtree(self.tdir, ignore_errors=True)
        self.setParent(None)
        self.find_identical_books_data = self.merged_books = self.added_duplicate_info = self.pool = self.items = self.duplicates = self.pd = self.db = self.dbref = self.tdir = self.file_groups = self.scan_thread = None  # noqa
        self.deleteLater()

    def tick(self):
        if self.pd.canceled:
            try:
                if callable(self.callback):
                    self.callback(self)
            finally:
                self.break_cycles()
            return
        self.do_one()

    # Filesystem scan {{{

    def scan(self):

        try:
            compiled_rules = tuple(
                map(compile_rule, gprefs.get('add_filter_rules', ())))
        except Exception:
            compiled_rules = ()
            import traceback
            traceback.print_exc()

        if iswindows or isosx:

            def find_files(root):
                for dirpath, dirnames, filenames in os.walk(root):
                    for files in find_books_in_directory(
                            dirpath,
                            self.single_book_per_directory,
                            compiled_rules=compiled_rules):
                        if self.abort_scan:
                            return
                        self.file_groups[len(self.file_groups)] = files
        else:

            def find_files(root):
                if isinstance(root, type(u'')):
                    root = root.encode(filesystem_encoding)
                for dirpath, dirnames, filenames in os.walk(root):
                    try:
                        dirpath = dirpath.decode(filesystem_encoding)
                    except UnicodeDecodeError:
                        prints('Ignoring non-decodable directory:', dirpath)
                        continue
                    for files in find_books_in_directory(
                            dirpath,
                            self.single_book_per_directory,
                            compiled_rules=compiled_rules):
                        if self.abort_scan:
                            return
                        self.file_groups[len(self.file_groups)] = files

        def extract(source):
            tdir = tempfile.mkdtemp(suffix='_archive', dir=self.tdir)
            if source.lower().endswith('.zip'):
                from calibre.utils.zipfile import ZipFile
                try:
                    with ZipFile(source) as zf:
                        zf.extractall(tdir)
                except Exception:
                    prints('Corrupt ZIP file, trying to use local headers')
                    from calibre.utils.localunzip import extractall
                    extractall(source, tdir)
            elif source.lower().endswith('.rar'):
                from calibre.utils.unrar import extract
                extract(source, tdir)
            return tdir

        try:
            if isinstance(self.source, basestring):
                find_files(self.source)
                self.ignore_opf = True
            else:
                unreadable_files = []
                for path in self.source:
                    if self.abort_scan:
                        return
                    if os.access(path, os.R_OK):
                        if self.list_of_archives:
                            find_files(extract(path))
                            self.ignore_opf = True
                        else:
                            self.file_groups[len(self.file_groups)] = [path]
                    else:
                        unreadable_files.append(path)
                if unreadable_files:
                    if not self.file_groups:
                        m = ngettext(
                            'You do not have permission to read the selected file.',
                            'You do not have permission to read the selected files.',
                            len(unreadable_files))
                        self.scan_error = m + '\n' + '\n'.join(
                            unreadable_files)
                    else:
                        a = self.report.append
                        for f in unreadable_files:
                            a(
                                _('Could not add %s as you do not have permission to read the file'
                                  % f))
                            a('')
        except Exception:
            self.scan_error = traceback.format_exc()

    def monitor_scan(self):
        self.scan_thread.join(0.05)
        if self.scan_thread.is_alive():
            self.do_one_signal.emit()
            return
        if self.scan_error is not None:
            error_dialog(
                self.pd,
                _('Cannot add books'),
                _('Failed to add any books, click "Show details" for more information.'
                  ),
                det_msg=self.scan_error,
                show=True)
            self.break_cycles()
            return
        if not self.file_groups:
            error_dialog(self.pd,
                         _('Could not add'),
                         _('No e-book files were found in %s') % self.source,
                         show=True)
            self.break_cycles()
            return
        self.pd.max = len(self.file_groups)
        self.pd.title = _(
            'Reading metadata and adding to library (%d books)...'
        ) % self.pd.max
        self.pd.msg = ''
        self.pd.value = 0
        self.pool = Pool(name='AddBooks') if self.pool is None else self.pool
        if self.db is not None:
            if self.add_formats_to_existing:
                self.find_identical_books_data = self.db.data_for_find_identical_books(
                )
            else:
                try:
                    self.pool.set_common_data(self.db.data_for_has_book())
                except Failure as err:
                    error_dialog(
                        self.pd,
                        _('Cannot add books'),
                        _('Failed to add any books, click "Show details" for more information.'
                          ),
                        det_msg=as_unicode(err.failure_message) + '\n' +
                        as_unicode(err.details),
                        show=True)
                    self.pd.canceled = True
        self.groups_to_add = iter(self.file_groups)
        self.do_one = self.do_one_group
        self.do_one_signal.emit()

    # }}}

    def do_one_group(self):
        try:
            group_id = next(self.groups_to_add)
        except StopIteration:
            self.do_one = self.monitor_pool
            self.do_one_signal.emit()
            return
        try:
            self.pool(group_id, 'calibre.ebooks.metadata.worker',
                      'read_metadata', self.file_groups[group_id], group_id,
                      self.tdir)
        except Failure as err:
            error_dialog(
                self.pd,
                _('Cannot add books'),
                _('Failed to add any books, click "Show details" for more information.'
                  ),
                det_msg=as_unicode(err.failure_message) + '\n' +
                as_unicode(err.details),
                show=True)
            self.pd.canceled = True
        self.do_one_signal.emit()

    def monitor_pool(self):
        try:
            worker_result = self.pool.results.get(True, 0.05)
            self.pool.results.task_done()
        except Empty:
            try:
                self.pool.wait_for_tasks(timeout=0.01)
            except RuntimeError:
                pass  # Tasks still remaining
            except Failure as err:
                error_dialog(
                    self.pd,
                    _('Cannot add books'),
                    _('Failed to add some books, click "Show details" for more information.'
                      ),
                    det_msg=unicode(err.failure_message) + '\n' +
                    unicode(err.details),
                    show=True)
                self.pd.canceled = True
            else:
                # All tasks completed
                try:
                    join_with_timeout(self.pool.results, 0.01)
                except RuntimeError:
                    pass  # There are results remaining
                else:
                    # No results left
                    self.process_duplicates()
                    return
        else:
            group_id = worker_result.id
            if worker_result.is_terminal_failure:
                error_dialog(
                    self.pd,
                    _('Critical failure'),
                    _('The read metadata worker process crashed while processing'
                      ' some files. Adding of books is aborted. Click "Show details"'
                      ' to see which files caused the problem.'),
                    show=True,
                    det_msg='\n'.join(self.file_groups[group_id]))
                self.pd.canceled = True
            else:
                try:
                    self.process_result(group_id, worker_result.result)
                except Exception:
                    self.report_metadata_failure(group_id,
                                                 traceback.format_exc())
                self.pd.value += 1

        self.do_one_signal.emit()

    def report_metadata_failure(self, group_id, details):
        a = self.report.append
        paths = self.file_groups[group_id]
        a(''), a('-' * 70)
        m = ngettext('Failed to read metadata from the file:',
                     'Failed to read metadata from the files:', len(paths))
        a(m)
        [a('\t' + f) for f in paths]
        a(_('With error:')), a(details)
        mi = Metadata(_('Unknown'))
        mi.read_metadata_failed = False
        return mi

    def process_result(self, group_id, result):
        if result.err:
            mi = self.report_metadata_failure(group_id, result.traceback)
            paths = self.file_groups[group_id]
            has_cover = False
            duplicate_info = set() if self.add_formats_to_existing else False
        else:
            paths, opf, has_cover, duplicate_info = result.value
            try:
                mi = OPF(BytesIO(opf),
                         basedir=self.tdir,
                         populate_spine=False,
                         try_to_guess_cover=False).to_book_metadata()
                mi.read_metadata_failed = False
            except Exception:
                mi = self.report_metadata_failure(group_id,
                                                  traceback.format_exc())

        if mi.is_null('title'):
            for path in paths:
                mi.title = os.path.splitext(os.path.basename(path))[0]
                break
        if mi.application_id == '__calibre_dummy__':
            mi.application_id = None
        if gprefs.get('tag_map_on_add_rules'):
            from calibre.ebooks.metadata.tag_mapper import map_tags
            mi.tags = map_tags(mi.tags, gprefs['tag_map_on_add_rules'])

        self.pd.msg = mi.title

        cover_path = os.path.join(self.tdir, '%s.cdata' %
                                  group_id) if has_cover else None

        if self.db is None:
            if paths:
                self.items.append((mi, cover_path, paths))
            return

        if self.add_formats_to_existing:
            identical_book_ids = find_identical_books(
                mi, self.find_identical_books_data)
            if identical_book_ids:
                try:
                    self.merge_books(mi, cover_path, paths, identical_book_ids)
                except Exception:
                    a = self.report.append
                    a(''), a('-' * 70)
                    a(_('Failed to merge the book: ') + mi.title)
                    [a('\t' + f) for f in paths]
                    a(_('With error:')), a(traceback.format_exc())
            else:
                self.add_book(mi, cover_path, paths)
        else:
            if duplicate_info or icu_lower(
                    mi.title or _('Unknown')) in self.added_duplicate_info:
                self.duplicates.append((mi, cover_path, paths))
            else:
                self.add_book(mi, cover_path, paths)

    def merge_books(self, mi, cover_path, paths, identical_book_ids):
        self.merged_books.add((mi.title, ' & '.join(mi.authors)))
        seen_fmts = set()
        replace = gprefs['automerge'] == 'overwrite'
        cover_removed = False
        for identical_book_id in identical_book_ids:
            ib_fmts = {
                fmt.upper()
                for fmt in self.db.formats(identical_book_id)
            }
            seen_fmts |= ib_fmts
            self.add_formats(identical_book_id, paths, mi, replace=replace)
        if gprefs['automerge'] == 'new record':
            incoming_fmts = {
                path.rpartition(os.extsep)[-1].upper()
                for path in paths
            }
            if incoming_fmts.intersection(seen_fmts):
                # There was at least one duplicate format so create a new
                # record and put the incoming formats into it We should
                # arguably put only the duplicate formats, but no real harm is
                # done by having all formats
                self.add_book(mi, cover_path, paths)
                cover_removed = True
        if not cover_removed and cover_path:
            try:
                os.remove(cover_path)
            except Exception:
                pass

    def add_book(self, mi, cover_path, paths):
        if DEBUG:
            st = time.time()
        try:
            cdata = None
            if cover_path:
                with open(cover_path, 'rb') as f:
                    cdata = f.read()
                try:
                    os.remove(cover_path)
                except Exception:
                    pass
            book_id = self.dbref().create_book_entry(mi, cover=cdata)
            self.added_book_ids.add(book_id)
        except Exception:
            a = self.report.append
            a(''), a('-' * 70)
            a(_('Failed to add the book: ') + mi.title)
            [a('\t' + f) for f in paths]
            a(_('With error:')), a(traceback.format_exc())
            return
        self.add_formats(book_id, paths, mi, is_an_add=True)
        try:
            if self.add_formats_to_existing:
                self.db.update_data_for_find_identical_books(
                    book_id, self.find_identical_books_data)
            else:
                self.added_duplicate_info.add(
                    icu_lower(mi.title or _('Unknown')))
        except Exception:
            # Ignore this exception since all it means is that duplicate
            # detection/automerge will fail for this book.
            traceback.print_exc()
        if DEBUG:
            prints('Added', mi.title, 'to db in: %.1f' % (time.time() - st))

    def add_formats(self, book_id, paths, mi, replace=True, is_an_add=False):
        fmap = {p.rpartition(os.path.extsep)[-1].lower(): p for p in paths}
        fmt_map = {}
        for fmt, path in fmap.iteritems():
            # The onimport plugins have already been run by the read metadata
            # worker
            if self.ignore_opf and fmt.lower() == 'opf':
                continue
            try:
                if self.db.add_format(book_id,
                                      fmt,
                                      path,
                                      run_hooks=False,
                                      replace=replace):
                    run_plugins_on_postimport(self.dbref(), book_id, fmt)
                    fmt_map[fmt.lower()] = path
            except Exception:
                a = self.report.append
                a(''), a('-' * 70)
                a(
                    _('Failed to add the file {0} to the book: {1}').format(
                        path, mi.title))
                a(_('With error:')), a(traceback.format_exc())
        if is_an_add:
            run_plugins_on_postadd(self.dbref(), book_id, fmt_map)

    def process_duplicates(self):
        if self.duplicates:
            d = DuplicatesQuestion(self.dbref(), self.duplicates, self.pd)
            duplicates = tuple(d.duplicates)
            d.deleteLater()
            if duplicates:
                self.do_one = self.process_duplicate
                self.duplicates_to_process = iter(duplicates)
                self.pd.title = _('Adding duplicates')
                self.pd.msg = ''
                self.pd.max, self.pd.value = len(duplicates), 0
                self.do_one_signal.emit()
                return
        self.finish()

    def process_duplicate(self):
        try:
            mi, cover_path, paths = next(self.duplicates_to_process)
        except StopIteration:
            self.finish()
            return
        self.pd.value += 1
        self.pd.msg = mi.title
        self.add_book(mi, cover_path, paths)
        self.do_one_signal.emit()

    def finish(self):
        if DEBUG:
            prints('Added %s books in %.1f seconds' %
                   (len(self.added_book_ids
                        or self.items), time.time() - self.start_time))
        if self.report:
            added_some = self.items or self.added_book_ids
            d = warning_dialog if added_some else error_dialog
            msg = _(
                'There were problems adding some files, click "Show details" for more information'
            ) if added_some else _(
                'Failed to add any books, click "Show details" for more information'
            )
            d(self.pd,
              _('Errors while adding'),
              msg,
              det_msg='\n'.join(self.report),
              show=True)

        if gprefs[
                'manual_add_auto_convert'] and self.added_book_ids and self.parent(
                ) is not None:
            self.parent().iactions['Convert Books'].auto_convert_auto_add(
                self.added_book_ids)

        try:
            if callable(self.callback):
                self.callback(self)
        finally:
            self.break_cycles()

    @property
    def number_of_books_added(self):
        return len(self.added_book_ids)
Example #34
0
     print(BRIGHTRED + "Error: Not enough free space on disk for merged plot file!" + RESET_ALL)
     sys.exit(1)
 print(BRIGHTGREEN + f"Writing merged plot to {outPathName}.merging..." + RESET_ALL)
 if bDryRun:
     print(BRIGHTBLUE + "Skip merging plot files because of dry run option.")
     sys.exit(0)
 bStop = False
 buf = deque()
 sem = Semaphore(1000)
 lock = _thread.allocate_lock()
 thrReader = Thread(target = readerThread, args = ( buf, sem, lock ), daemon = True)
 thrReader.start()
 cnt = written = lastWritten = 0
 t0 = t1 = time.time()
 with open(outPathName + ".merging", "wb") as O:
     while thrReader.is_alive() or buf:
         try:
             data = buf.popleft()
             if data is None:
                 break
             O.write(data)
             sem.release()
             cnt += 1
             written += len(data)
             if cnt >= 1000:
                 t2 = time.time()
                 print("%.1f%% written. %d MB/s. " % (100 * written / outSize, (written - lastWritten) // MB / (t2 - t1)),
                       end = "\r")
                 cnt = 0
                 lastWritten = written
                 t1 = t2
Example #35
0
 def is_alive(self):
     try:
         return Thread.is_alive(self)
     except AttributeError:
         return Thread.isAlive(self)
Example #36
0
class ARFTestRunner(object):
    CURRENT_DIR = op.dirname(op.abspath(__file__))
    TEMP_CODE_TEST_DIR = op.join(CURRENT_DIR, "code_test")
    DECLARE_FILE = "test_import_declare.py.template"

    def __init__(self, envs, logger, all_template_setting=None, **kwargs):
        '''
        all_template_setting = {
            "test_template_setting" :
                {
                "template_dir": ,
                "template_declare_file": ,
                },
            "input_template_setting" :
                {
                "template_dir":,
                "template_input_file":,
                "template_lookup_dir":,
                }
        }
        '''
        self._envs = envs
        self._logger = logger
        self._input_sim_obj = None
        self._build_setting = self._envs.get("build_setting")
        self._ta_name = self._build_setting.get(ac.SHORT_NAME)
        self._lib_dir = common_util.get_python_lib_dir_name(self._ta_name)
        self._test_setting = self._envs["test_setting"]
        self._test_global_settings = self._envs.get("global_settings")
        self._package_dir = self._test_setting.get("ta_root_dir")
        self._local_ta_dir = self._test_setting.get("local_ta_dir")
        self._code_test_dir = op.join(
            self._test_setting.get("test_container_dir"), "code_test")
        self._stdout_file = self._test_setting.get("stdout_file")
        self._stderr_file = self._test_setting.get("stderr_file")
        self._timeout = self._test_setting.get("timeout") or DEFAULT_TIMEOUT
        self._all_template_setting = all_template_setting
        self.init_template_setting()
        self._temp_mgr = AlertActionsTemplateMgr(
            template_dir=self._template_dir)
        self._child_proc = None
        self.init_test_env()
        self._subprocess_out = {"stderr": "", "stdout": ""}

    def init_code_file(self):
        self._test_code_file = self._test_setting.get("code_file")
        if self._test_code_file:
            copy(self._test_code_file, self._code_test_dir)
            return

        orig_main_code_file = op.join(self._package_dir, "bin",
                                      self._test_setting.get("name") + ".py")
        self._test_code_file = op.join(self._code_test_dir,
                                       self._test_setting.get("name") + ".py")

        if not op.exists(op.dirname(self._test_code_file)):
            os.makedirs(op.dirname(self._test_code_file))
        # if op.exists(self._test_code_file):
        #     os.remove(self._test_code_file)
        copy(orig_main_code_file, self._test_code_file)

        self._test_code = self._test_setting.get("code")
        if self._test_code:
            file_path = op.join(self._package_dir, "bin", self._lib_dir,
                                self._test_setting.get("name") + "_helper.py")
            if not op.exists(op.dirname(file_path)):
                os.makedirs(op.dirname(file_path))
            with open(file_path, 'w+') as fp:
                fp.write(self._test_code)
            return

        raise aae.AlertTestCodeFileNotExistFailure('test_setting={}'.format(
            self._test_setting))

    def init_test_env(self):
        self._alert_name = self._test_setting.get("name")
        self._temp_code_test_dir = self._test_setting.get("temp_code_test_dir") \
            or ARFTestRunner.TEMP_CODE_TEST_DIR
        if op.exists(self._code_test_dir):
            rmtree(self._code_test_dir)
        copytree(self._temp_code_test_dir, self._code_test_dir)

        self.init_code_file()
        return

    def init_template_setting(self):
        self._template_setting = None
        self._template_dir = None
        self._template_declare_file = ARFTestRunner.DECLARE_FILE
        self._input_template_setting = None
        if not self._all_template_setting:
            return

        self._template_setting = self._all_template_setting.get(
            "test_template_setting")
        self._input_template_setting = self._all_template_setting.get(
            "input_template_setting")
        if not self._template_setting:
            return

        self._template_dir = self._template_setting.get("template_dir")
        if self._template_setting.get("template_declare_file"):
            self._template_declare_file = self._template_setting[
                "template_declare_file"]

    def get_new_declare_file_path(self):
        return op.join(self._code_test_dir, self._lib_dir + "_declare.py")

    def prepare_declare_file(self):
        # Generate a new declare file, which will add the code test directory as
        # the first element of sys.path. Then the subprocess will import the
        # overwriten logger_helper with an additonal stderr steamhandler so
        # that all output including logger's will be redirect to stderr
        template_declare = self._template_declare_file
        if not op.isabs(self._template_declare_file):
            template_declare = op.join(self._temp_mgr.get_template_dir(),
                                       self._template_declare_file)

        try:
            self._template = Template(filename=template_declare)
            final_content = self._template.render(
                package_root_path=aah.split_path(self._package_dir),
                local_ta_dir=aah.split_path(self._local_ta_dir))
        except Exception:
            msg = 'operation="render declare file", file="{}", '\
                'status="failed", reason="{}"'.format(template_declare,
                                                      format_exc())
            raise aae.AlertTestWritingFileFailure(msg)

        file_path = self.get_new_declare_file_path()

        try:
            with open(file_path, 'w+') as fp:
                fp.write(final_content)
        except IOError:
            msg = 'operation="write", object="{}", '\
                'status="failed", reason="{}"'.format(template_declare,
                                                      format_exc())
            raise aae.AlertTestWritingFileFailure(msg)

    def prepare(self):
        self.prepare_declare_file()

        # copy the main py file to relocate directory
        self._py_file = op.join(self._code_test_dir,
                                op.basename(self._test_code_file))

        # simulate alert input
        input_setting = self._test_setting.get("input_setting")
        self._input_sim_obj = simulate_alert_input(
            alert_input_setting=input_setting,
            logger=self._logger,
            results_dir=self._code_test_dir,
            template_setting=self._input_template_setting)
        self._inputs = self._input_sim_obj.simulate()

    def read_stdpipe_and_write(self, from_pipe, to_file, backup_pipe,
                               out_type):
        if to_file:
            try:
                with open(to_file, 'w+') as sf:
                    while True:
                        line = from_pipe.readline()
                        if not line:
                            break
                        sf.write(line)
                        self._subprocess_out[out_type] += line
            except IOError:
                msg = 'operation="write", content="{}", to="{}" '\
                    'status="failed", reason="{}"'.format(line,
                                                          to_file,
                                                          format_exc())
                raise aae.AlertTestResultWritingFailure(msg)
        else:
            while True:
                line = from_pipe.readline()
                if not line:
                    break
                # if backup_pipe:
                #     backup_pipe.write(line)
                self._subprocess_out[out_type] += line.decode()

    def write_test_result(self, output=None, error=None):
        try:
            with open(self._stdout_file, 'w+') as sf:
                sf.write(output)
        except IOError:
            msg = 'operation="write", content="{}", to="{}", '\
                'status="failed", reason="{}"'.format(output,
                                                      self._stdout_file,
                                                      format_exc())
            raise aae.AlertTestWritingFileFailure(msg)

        try:
            with open(self._stderr_file, 'w+') as sf:
                sf.write(error)
        except IOError:
            msg = 'operation="write", content="{}", to="{}" '\
                'status="failed", reason="{}"'.format(error,
                                                      self._stderr_file,
                                                      format_exc())
            raise aae.AlertTestResultWritingFailure(msg)

    def kill_subprocess(self):
        if not self._child_proc:
            return
        try:
            self._child_proc.kill()
        except OSError as oe:
            if oe.errno == errno.ESRCH:
                self._logger.info(
                    'operation="kill", object="process", pid="%s"'
                    ', status="success"', self._child_proc.pid)
            else:
                msg = 'operation="kill", object="process", pid="%s", status="failed"'.format(
                    self._child_proc.pid)
                raise aae.AlertTestKillingSubprocessFailure(msg)
        else:
            self._logger.info('operation="kill", object="process", pid="%s", '\
                              'status="success"', self._child_proc.pid)

    def _get_splunk_bin(self):
        if os.name == 'nt':
            splunk_bin = 'splunk.exe'
        else:
            splunk_bin = 'splunk'
        return make_splunk_path(('bin', splunk_bin))

    def start_subprocess(self):
        self.prepare()

        # use python3 for test by default
        if os.path.isfile(make_splunk_path(('bin', "python3"))) \
                or os.path.isfile(make_splunk_path(('bin', "python3.exe"))):
            cmd = [
                self._get_splunk_bin(), 'cmd', 'python3', self._py_file,
                "--execute"
            ]
        else:
            cmd = [
                self._get_splunk_bin(), 'cmd', 'python', self._py_file,
                "--execute"
            ]
        # cmd = ["python", self._py_file, "--execute"]
        for one_input in self._inputs:
            try:
                child_env = os.environ.copy()
                child_env[AOB_TEST_FLAG] = 'true'
                child_env[GLOBALSETTINGS] = json.dumps(
                    self._test_global_settings.get('settings', {}))
                self._child_proc = subprocess.Popen(cmd,
                                                    stdin=subprocess.PIPE,
                                                    stderr=subprocess.PIPE,
                                                    stdout=subprocess.PIPE,
                                                    cwd=self._code_test_dir,
                                                    env=child_env)
                self._logger.info('operation="start subprocess", pid="%s", '\
                                  'status="success", input="%s"',
                                  self._child_proc.pid, one_input)
            except subprocess.CalledProcessError as e:
                self._logger.info('operation="start subprocess", pid="%s", '\
                                  'status="failed", input="%s", reason="%s"',
                                  self._child_proc.pid, one_input, e.output)

            self._stderr_thd = Thread(target=self.read_stdpipe_and_write,
                                      args=(self._child_proc.stderr,
                                            self._stderr_file, sys.stderr,
                                            "stderr"))
            self._stdout_thd = Thread(target=self.read_stdpipe_and_write,
                                      args=(self._child_proc.stdout,
                                            self._stdout_file, sys.stdout,
                                            "stdout"))
            self._stderr_thd.daemon = True
            self._stdout_thd.daemon = True
            self._child_proc.stdin.write(json.dumps(one_input).encode())
            self._child_proc.stdin.close()
            self._stderr_thd.start()
            self._stdout_thd.start()

            self._stdout_thd.join(self._timeout)
            self._stderr_thd.join(self._timeout)
            if self._stderr_thd.is_alive() or self._stdout_thd.is_alive():
                msg = 'pid="{}" alert="{}" timeout={}'.format(
                    self._child_proc.pid, self._alert_name, self._timeout)
                self.kill_subprocess()
                raise aae.AlertTestSubprocessTimeoutFailure(msg)

            self._child_proc.wait()
            self._subprocess_out["exit_code"] = self._child_proc.returncode
            '''
            output, error = self._child_proc.communicate(
                                           input=json.dumps(one_input)+"\n")
            self.write_test_result(output, error)
            '''

    def clean_up(self):
        if self._input_sim_obj:
            self._input_sim_obj.clean_up()

        # if op.exists(self._code_test_dir):
        #     rmtree(self._code_test_dir)

    def run(self):
        try:
            self.start_subprocess()
        finally:
            self.clean_up()

        return self._subprocess_out
Example #37
0
class SoundManager:
    """
    Object which configure the sound management on the robot
    """
    def __init__(self):
        # - Init
        self.__sound_player = SoundPlayer()
        self.__sound_database = SoundDatabase()
        self.__text_to_speech = NiryoTextToSpeech(self, self.__sound_database)

        self.__sound_thread = Thread()
        self.sound_end_event = Event()
        self.sound_end_event.clear()

        self.play_sound(self.__sound_database.wake_up_sound)

        self.__rpi_overheating = False
        self.__overheat_timer = None
        self.__error_sound_delay = rospy.get_param("~error_sound_delay")

        # - Subscribers
        self.__robot_status = RobotStatus.BOOTING
        self.__logs_status = RobotStatus.NONE
        rospy.Subscriber('/niryo_robot_status/robot_status', RobotStatus,
                         self.__callback_sub_robot_status)

        rospy.Subscriber('/niryo_studio_connection', Empty,
                         self.__callback_niryo_studio)

        # - Services
        rospy.Service('/niryo_robot_sound/play', PlaySound,
                      self.__callback_play_sound_user)

        # Set a bool to mention this node is initialized
        rospy.set_param('~initialized', True)
        rospy.loginfo("Sound Interface - Started")

    # - Callbacks
    def __callback_sub_robot_status(self, msg):
        if self.__robot_status == RobotStatus.SHUTDOWN:
            return
        elif msg.robot_status == RobotStatus.SHUTDOWN:
            self.__robot_status = msg.robot_status
            rospy.sleep(1.5)  # avoid ctrl+c
            self.play_shutdown_sound()
            self.sound_end_event.set()
            return

        if msg.rpi_overheating != self.__rpi_overheating:
            self.__rpi_overheating = msg.rpi_overheating
            if self.__rpi_overheating:
                sound = self.__sound_database.error_sound
                self.play_sound(sound)
                self.__overheat_timer = rospy.Timer(
                    rospy.Duration(self.__error_sound_delay),
                    self.__error_sound_callback)
            elif self.__overheat_timer is not None:
                self.__overheat_timer.shutdown()
                self.__overheat_timer = None

        if self.__robot_status != msg.robot_status:
            last_status = self.__robot_status
            self.__robot_status = msg.robot_status

            if last_status in [RobotStatus.RUNNING_AUTONOMOUS, RobotStatus.LEARNING_MODE_AUTONOMOUS] \
                    and self.__robot_status not in [RobotStatus.RUNNING_AUTONOMOUS,
                                                    RobotStatus.LEARNING_MODE_AUTONOMOUS]:
                self.__sound_player.stop()

            if last_status == RobotStatus.BOOTING and self.__robot_status != RobotStatus.BOOTING:
                self.__sound_player.stop_w_fade_out()
                self.play_sound(self.__sound_database.robot_ready_sound)
            elif self.__robot_status in [
                    RobotStatus.FATAL_ERROR, RobotStatus.MOTOR_ERROR
            ]:
                self.play_sound(self.__sound_database.error_sound)
            elif last_status != RobotStatus.CALIBRATION_IN_PROGRESS and \
                    msg.robot_status == RobotStatus.CALIBRATION_IN_PROGRESS:
                self.play_sound(self.__sound_database.calibration_sound)

        if self.__logs_status != msg.logs_status:
            self.__logs_status = msg.logs_status
            if self.__logs_status in [
                    RobotStatus.ERROR, RobotStatus.FATAL_ERROR
            ]:
                self.play_sound(self.__sound_database.error_sound)

    def __callback_play_sound_user(self, msg):
        sound_name = msg.sound_name
        return self.play_user_sound(sound_name, msg.start_time_sec,
                                    msg.end_time_sec, msg.wait_end)

    def play_user_sound(self,
                        sound_name,
                        start_time_sec=0,
                        end_time_sec=0,
                        wait_end=True):
        sound = self.__sound_database(sound_name)
        if sound is None:
            return CommandStatus.SOUND_FILE_NOT_FOUND, "{} sound not found".format(
                sound_name)

        self.play_sound(sound, start_time_sec, end_time_sec, wait=wait_end)

        if sound.preempted:
            return CommandStatus.SUCCESS, "{} sound preempted".format(
                sound_name)
        return CommandStatus.SUCCESS, "{} sound played with success".format(
            sound_name)

    def __error_sound_callback(self, _):
        if self.__rpi_overheating:
            sound = self.__sound_database.error_sound
            self.play_sound(sound)
        elif self.__overheat_timer is not None:
            self.__overheat_timer.shutdown()
            self.__overheat_timer = None

    def __callback_niryo_studio(self, _):
        if not self.__sound_player.is_busy():
            sound = self.__sound_database.connection_sound
            self.play_sound(sound)

    def play_sound(self, sound, start_time=0, end_time=0, wait=False):
        if self.__sound_thread.is_alive():
            self.__sound_player.stop()
            self.__sound_thread.join()

        self.__sound_thread = Thread(target=self.__sound_player.play_sound,
                                     args=(sound, start_time, end_time))
        self.__sound_thread.start()

        if wait:
            while not rospy.is_shutdown() and self.__sound_thread.is_alive():
                self.__sound_thread.join(timeout=0.1)

    def play_shutdown_sound(self):
        rospy.loginfo("Play shutdown sound")

        if self.__overheat_timer is not None:
            self.__overheat_timer.shutdown()
            self.__overheat_timer = None

        if self.__sound_thread.is_alive():
            self.__sound_player.stop()
            self.__sound_thread.join()

        sound = self.__sound_database.sleep_sound
        self.__sound_player.play_sound(sound)
Example #38
0
def main():
    args = get_args()

    # Check for depreciated argumented
    if args.debug:
        log.warning(
            '--debug is depreciated. Please use --verbose instead.  Enabling --verbose'
        )
        args.verbose = 'nofile'

    # Add file logging if enabled
    if args.verbose and args.verbose != 'nofile':
        filelog = logging.FileHandler(args.verbose)
        filelog.setFormatter(
            logging.Formatter(
                '%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] %(message)s'
            ))
        logging.getLogger('').addHandler(filelog)
    if args.very_verbose and args.very_verbose != 'nofile':
        filelog = logging.FileHandler(args.very_verbose)
        filelog.setFormatter(
            logging.Formatter(
                '%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] %(message)s'
            ))
        logging.getLogger('').addHandler(filelog)

    # Check if we have the proper encryption library file and get its path
    encryption_lib_path = get_encryption_lib_path(args)
    if encryption_lib_path is "":
        sys.exit(1)

    if args.verbose or args.very_verbose:
        log.setLevel(logging.DEBUG)
    else:
        log.setLevel(logging.INFO)

    # Let's not forget to run Grunt / Only needed when running with webserver
    if not args.no_server:
        if not os.path.exists(
                os.path.join(os.path.dirname(__file__), 'static/dist')):
            log.critical(
                'Missing front-end assets (static/dist) -- please run "npm install && npm run build" before starting the server'
            )
            sys.exit()

    # These are very noisey, let's shush them up a bit
    logging.getLogger('peewee').setLevel(logging.INFO)
    logging.getLogger('requests').setLevel(logging.WARNING)
    logging.getLogger('pgoapi.pgoapi').setLevel(logging.WARNING)
    logging.getLogger('pgoapi.rpc_api').setLevel(logging.INFO)
    logging.getLogger('werkzeug').setLevel(logging.ERROR)

    config['parse_pokemon'] = not args.no_pokemon
    config['parse_pokestops'] = not args.no_pokestops
    config['parse_gyms'] = not args.no_gyms

    # Turn these back up if debugging
    if args.verbose or args.very_verbose:
        logging.getLogger('pgoapi').setLevel(logging.DEBUG)
    if args.very_verbose:
        logging.getLogger('peewee').setLevel(logging.DEBUG)
        logging.getLogger('requests').setLevel(logging.DEBUG)
        logging.getLogger('pgoapi.pgoapi').setLevel(logging.DEBUG)
        logging.getLogger('pgoapi.rpc_api').setLevel(logging.DEBUG)
        logging.getLogger('rpc_api').setLevel(logging.DEBUG)
        logging.getLogger('werkzeug').setLevel(logging.DEBUG)

    # use lat/lng directly if matches such a pattern
    prog = re.compile("^(\-?\d+\.\d+),?\s?(\-?\d+\.\d+)$")
    res = prog.match(args.location)
    if res:
        log.debug('Using coordinates from CLI directly')
        position = (float(res.group(1)), float(res.group(2)), 0)
    else:
        log.debug('Looking up coordinates in API')
        position = util.get_pos_by_name(args.location)

    # Use the latitude and longitude to get the local altitude from Google
    try:
        url = 'https://maps.googleapis.com/maps/api/elevation/json?locations={},{}'.format(
            str(position[0]), str(position[1]))
        altitude = requests.get(url).json()[u'results'][0][u'elevation']
        log.debug('Local altitude is: %sm', altitude)
        position = (position[0], position[1], altitude)
    except (requests.exceptions.RequestException, IndexError, KeyError):
        log.error('Unable to retrieve altitude from Google APIs; setting to 0')

    if not any(position):
        log.error('Could not get a position by name, aborting')
        sys.exit()

    log.info('Parsed location is: %.4f/%.4f/%.4f (lat/lng/alt)', position[0],
             position[1], position[2])

    if args.no_pokemon:
        log.info('Parsing of Pokemon disabled')
    if args.no_pokestops:
        log.info('Parsing of Pokestops disabled')
    if args.no_gyms:
        log.info('Parsing of Gyms disabled')

    config['LOCALE'] = args.locale
    config['CHINA'] = args.china

    app = Pogom(__name__)
    db = init_database(app)
    if args.clear_db:
        log.info('Clearing database')
        if args.db_type == 'mysql':
            drop_tables(db)
        elif os.path.isfile(args.db):
            os.remove(args.db)
    create_tables(db)

    app.set_current_location(position)

    # Control the search status (running or not) across threads
    pause_bit = Event()
    pause_bit.clear()

    # Setup the location tracking queue and push the first location on
    new_location_queue = Queue()
    new_location_queue.put(position)

    # DB Updates
    db_updates_queue = Queue()

    # Thread(s) to process database updates
    for i in range(args.db_threads):
        log.debug('Starting db-updater worker thread %d', i)
        t = Thread(target=db_updater,
                   name='db-updater-{}'.format(i),
                   args=(args, db_updates_queue))
        t.daemon = True
        t.start()

    # db clearner; really only need one ever
    t = Thread(target=clean_db_loop, name='db-cleaner', args=(args, ))
    t.daemon = True
    t.start()

    # WH Updates
    wh_updates_queue = Queue()

    # Thread to process webhook updates
    for i in range(args.wh_threads):
        log.debug('Starting wh-updater worker thread %d', i)
        t = Thread(target=wh_updater,
                   name='wh-updater-{}'.format(i),
                   args=(args, wh_updates_queue))
        t.daemon = True
        t.start()

    if not args.only_server:

        # Check all proxies before continue so we know they are good
        if args.proxy:

            # Overwrite old args.proxy with new working list
            args.proxy = check_proxies(args)

        # Gather the pokemons!

        # check the sort of scan
        if args.spawnpoint_scanning:
            mode = 'sps'
        else:
            mode = 'hex'

        # attempt to dump the spawn points (do this before starting threads of endure the woe)
        if args.spawnpoint_scanning and args.spawnpoint_scanning != 'nofile' and args.dump_spawnpoints:
            with open(args.spawnpoint_scanning, 'w+') as file:
                log.info('Saving spawn points to %s', args.spawnpoint_scanning)
                spawns = Pokemon.get_spawnpoints_in_hex(
                    position, args.step_limit)
                file.write(json.dumps(spawns))
                log.info('Finished exporting spawn points')

        argset = (args, mode, new_location_queue, pause_bit,
                  encryption_lib_path, db_updates_queue, wh_updates_queue)

        log.debug('Starting a %s search thread', mode)
        search_thread = Thread(target=search_overseer_thread,
                               name='search-overseer',
                               args=argset)
        search_thread.daemon = True
        search_thread.start()

    if args.cors:
        CORS(app)

    # No more stale JS
    init_cache_busting(app)

    app.set_search_control(pause_bit)
    app.set_location_queue(new_location_queue)

    config['ROOT_PATH'] = app.root_path
    config['GMAPS_KEY'] = args.gmaps_key

    if args.no_server:
        # This loop allows for ctrl-c interupts to work since gevent won't be holding the program open
        while search_thread.is_alive():
            time.sleep(60)
    else:
        # run gevent server
        gevent_log = None
        if args.verbose or args.very_verbose:
            gevent_log = log
        if args.ssl_certificate and args.ssl_privatekey \
                and os.path.exists(args.ssl_certificate) and os.path.exists(args.ssl_privatekey):
            http_server = pywsgi.WSGIServer((args.host, args.port),
                                            app,
                                            log=gevent_log,
                                            error_log=log,
                                            keyfile=args.ssl_privatekey,
                                            certfile=args.ssl_certificate,
                                            ssl_version=ssl.PROTOCOL_TLSv1_2)
            log.info('Web server in SSL mode, listening at https://%s:%d',
                     args.host, args.port)
        else:
            http_server = pywsgi.WSGIServer((args.host, args.port),
                                            app,
                                            log=gevent_log,
                                            error_log=log)
            log.info('Web server listening at http://%s:%d', args.host,
                     args.port)
        # run it
        try:
            http_server.serve_forever()
        except KeyboardInterrupt:
            pass
Example #39
0
class StatsAndLogging:
    """A thread to aggregate statistics and logging."""
    def __init__(self, jobStore, config):
        self._stop = Event()
        self._worker = Thread(target=self.statsAndLoggingAggregator,
                              args=(jobStore, self._stop, config),
                              daemon=True)

    def start(self):
        """Start the stats and logging thread."""
        self._worker.start()

    @classmethod
    def formatLogStream(cls,
                        stream: Union[TextIO, BinaryIO],
                        job_name: Optional[str] = None):
        """
        Given a stream of text or bytes, and the job name, job itself, or some
        other optional stringifyable identity info for the job, return a big
        text string with the formatted job log, suitable for printing for the
        user.

        We don't want to prefix every line of the job's log with our own
        logging info, or we get prefixes wider than any reasonable terminal
        and longer than the messages.
        """
        lines = [f'Log from job "{job_name}" follows:', '=========>']

        for line in stream:
            if isinstance(line, bytes):
                line = line.decode('utf-8', errors='replace')
            lines.append('\t' + line.rstrip('\n'))

        lines.append('<=========')

        return '\n'.join(lines)

    @classmethod
    def logWithFormatting(cls,
                          jobStoreID,
                          jobLogs,
                          method=logger.debug,
                          message=None):
        if message is not None:
            method(message)

        # Format and log the logs, identifying the job with its job store ID.
        method(cls.formatLogStream(jobLogs, jobStoreID))

    @classmethod
    def writeLogFiles(cls, jobNames, jobLogList, config, failed=False):
        def createName(logPath, jobName, logExtension, failed=False):
            logName = jobName.replace('-', '--')
            logName = logName.replace('/', '-')
            logName = logName.replace(' ', '_')
            logName = logName.replace("'", '')
            logName = logName.replace('"', '')
            # Add a "failed_" prefix to logs from failed jobs.
            logName = ('failed_' if failed else '') + logName
            counter = 0
            while True:
                suffix = str(counter).zfill(3) + logExtension
                fullName = os.path.join(logPath, logName + suffix)
                #  The maximum file name size in the default HFS+ file system is 255 UTF-16 encoding units, so basically 255 characters
                if len(fullName) >= 255:
                    return fullName[:(255 - len(suffix))] + suffix
                if not os.path.exists(fullName):
                    return fullName
                counter += 1

        mainFileName = jobNames[0]
        extension = '.log'

        if config.writeLogs:
            path = config.writeLogs
            writeFn = open
        elif config.writeLogsGzip:
            path = config.writeLogsGzip
            writeFn = gzip.open
            extension += '.gz'
        else:
            # we don't have anywhere to write the logs, return now
            return

        fullName = createName(path, mainFileName, extension, failed)
        with writeFn(fullName, 'wb') as f:
            for l in jobLogList:
                if isinstance(l, bytes):
                    l = l.decode('utf-8')
                if not l.endswith('\n'):
                    l += '\n'
                f.write(l.encode('utf-8'))
        for alternateName in jobNames[1:]:
            # There are chained jobs in this output - indicate this with a symlink
            # of the job's name to this file
            name = createName(path, alternateName, extension, failed)
            if not os.path.exists(name):
                os.symlink(os.path.relpath(fullName, path), name)

    @classmethod
    def statsAndLoggingAggregator(cls, jobStore, stop, config):
        """
        The following function is used for collating stats/reporting log messages from the workers.
        Works inside of a thread, collates as long as the stop flag is not True.
        """
        #  Overall timing
        startTime = time.time()
        startClock = get_total_cpu_time()

        def callback(fileHandle):
            statsStr = fileHandle.read()
            if not isinstance(statsStr, str):
                statsStr = statsStr.decode()
            stats = json.loads(statsStr, object_hook=Expando)
            try:
                logs = stats.workers.logsToMaster
            except AttributeError:
                # To be expected if there were no calls to logToMaster()
                pass
            else:
                for message in logs:
                    logger.log(int(message.level),
                               'Got message from job at time %s: %s',
                               time.strftime('%m-%d-%Y %H:%M:%S'),
                               message.text)
            try:
                logs = stats.logs
            except AttributeError:
                pass
            else:
                # we may have multiple jobs per worker
                jobNames = logs.names
                messages = logs.messages
                cls.logWithFormatting(
                    jobNames[0],
                    messages,
                    message=
                    'Received Toil worker log. Disable debug level logging to hide this output'
                )
                cls.writeLogFiles(jobNames, messages, config=config)

        while True:
            # This is a indirect way of getting a message to the thread to exit
            if stop.is_set():
                jobStore.readStatsAndLogging(callback)
                break
            if jobStore.readStatsAndLogging(callback) == 0:
                time.sleep(0.5)  # Avoid cycling too fast

        # Finish the stats file
        text = json.dumps(dict(total_time=str(time.time() - startTime),
                               total_clock=str(get_total_cpu_time() -
                                               startClock)),
                          ensure_ascii=True)
        jobStore.writeStatsAndLogging(bytes(text, 'utf-8'))

    def check(self):
        """
        Check on the stats and logging aggregator.
        :raise RuntimeError: If the underlying thread has quit.
        """
        if not self._worker.is_alive():
            raise RuntimeError("Stats and logging thread has quit")

    def shutdown(self):
        """Finish up the stats/logging aggregation thread."""
        logger.debug(
            'Waiting for stats and logging collator thread to finish ...')
        startTime = time.time()
        self._stop.set()
        self._worker.join()
        logger.debug('... finished collating stats and logs. Took %s seconds',
                     time.time() - startTime)
Example #40
0
class PWM:
    def __init__(self, channel, frequency):
        _pwms.append(self)
        self._thread = None
        self._running = False
        self._duty_cycle = 0
        self._frequency = 1

        if _io_mode == BOARD:
            channel = _board2bcm(channel)
        self._pin = _io_pins[channel]

    #end def

    def __del__(self):
        if self._thread is not None:
            self.stop()
        if _pwms is not None:
            for pwm in _pwms:
                if pwm is self:
                    _pwms.remove(self)
                    break

    #end def

    def _worker(self):
        self._running = True
        reset_time = 0
        flank_time = 0
        # run until stopped
        while self._running:
            now = int(time() * 1000)
            if now >= flank_time:
                self._pin.value = 0
            if now >= reset_time:
                self._pin.value = 1
                reset_time = int(now + 1000.0 / self._frequency)
                flank_time = int(now +
                                 self._duty_cycle * 10.0 / self._frequency)
            sleep(0.001)
        #end while

    #end def

    def start(self, dc):
        self.ChangeDutyCycle(dc)
        self._thread = Thread(target=self._worker)
        self._thread.start()

    #end def

    def stop(self):
        if self._thread is not None:
            self._running = False
            if self._thread.is_alive():
                self._thread.join()
        self._thread = None

    #end def

    def ChangeFrequency(self, freq):
        if not isinstance(freq, int) or not isinstance(freq, float):
            raise ArgumentException("Invalid type")
        self._frequency = freq

    #end def

    def ChangeDutyCycle(self, dc):
        if dc < 0 or dc > 100:
            raise ArgumentException("dc out of range")
        self._duty_cycle = dc
Example #41
0
class VideoStream(object):
    '''
    Use two threads to read a video stream, a child thread decompresses 
    the video in background and put the frame in a queue that are 
    accessible for processing in the main program.
    '''
    
    def __init__(self, fname, beginning=None, end=None, quiet=True):
        '''
        Initialize the stream with the video file name and optional 
        beginning and end timestamps.
        '''
        
        # save the input file name
        self.fname = fname
        
        # initialize the camera
        self.camera = cv2.VideoCapture(self.fname)
        
        # store the fps value
        self.fps = self.camera.get(CAP_PROP_FPS)
        
        # store the length of the video (in frames)
        self.length = self.camera.get(CAP_PROP_FRAME_COUNT)
        
        # store video dimensions
        self.width = self.camera.get(3)
        self.height = self.camera.get(4)
        
        # set the time of the first frame to be read
        self.set_beginning(beginning)
        
        # set the time of the last frame to be read
        self.set_end(end)
        
        # set the quiet parameter
        self.quiet = quiet

        # this is the number of frames that have been yielded by the read
        # method
        self.frame_number = 0
        
        # this is the cap position of the reader method
        self.cap_position = self.beginning.frame_number
        
        # the queue allow the reader method to store read frames
        self.Q = Queue(maxsize=128)
        
        # the stopped value is set when the stop method is called or
        # within the reader thread, when all the input frames has been
        # processed
        self.stopped = Event()
        
        # the released value is set when the camera has been released
        self.released = Event()
        
        # this is the reader thread, ###it is a daemon thread
        self.t = Thread(target=self.reader, args=())
        #self.t.daemon = True
    
    def __iter__(self):
        return self

    def next(self):
        frame = self.read()
        if frame is None:
            raise StopIteration
        return frame
        
    def set_beginning(self, timestamp=None):
        if timestamp is None: timestamp = "0:0:0"
        self.beginning = VideoTime(timestamp, self.fps)
        self.camera.set(CAP_PROP_POS_FRAMES, 
                        self.beginning.frame_number)
        
    def set_end(self, timestamp=None):
        if timestamp is None:
            self.end = frame_videotime(self.length, self.fps)
        else:
            self.end = VideoTime(timestamp, self.fps)
    
    def running(self):
        return not self.stopped.is_set()
        
    def start(self):
        self.t.start()
    
    def stop(self):
        self.stopped.set()
        self.t.join()
        
    def read(self):
        while not self.Q.qsize():
            if not self.t.is_alive(): 
                return
            if self.stopped.is_set(): 
                return
        self.frame_number += 1
        return self.Q.get()
        
    def before(self):
        return self.cap_position < self.beginning.frame_number
        
    def after(self):
        return ( self.end is not None and
                 self.cap_position >= self.end.frame_number )
    
    def in_range(self):
        return not (self.before() and self.after())
    
    def reader(self):
        while not self.stopped.is_set():
            if not self.Q.full():

                # the cap reached the video's end
                if self.cap_position >= self.end.frame_number:
                    self.stopped.set()
                    break

                # get the next frame's index
                self.cap_position = self.camera.get(1)
                
                # the cap exceeded the portion of the video to be decoded
                if self.after():
                    self.stopped.set()
                    break          

                # grab the next frame
                grabbed = self.camera.grab()

                # sometimes, the frame is not grabbed... if this happens too
                # often, consider working with another video format.
                while not grabbed:
                    if not self.quiet:
                        sys.stderr.write(
                            ("Video file '{}': error while trying to grab"
                             " frame number {}... trying again\n").format(
                                self.fname, self.cap_position))
                    grabbed = self.camera.grab()

                # to circumvent possible issue when trying to set the cap 
                # with some compressed stream, the video is decoded and the
                # frames skipped until it reaches self.camera.beginning, where
                # they start to be added to the queue (self.Q)               
                (grabbed, frame) = self.camera.retrieve()
                if not self.before(): self.Q.put(frame)
        self.camera.release()  
        self.released.set()
Example #42
0
class AppManager(object):
    """
    An application manager takes the responsibility of setting up the communication infrastructure, instantiates the
    ResourceManager, TaskManager, WFProcessor objects and all their threads and processes. This is the Master object
    running in the main process and is designed to recover from errors from all other objects, threads and processes.

    :Arguments:
        :config_path: Url to config path to be read for AppManager
        :hostname: host rabbitmq server is running
        :port: port at which rabbitmq can be accessed
        :reattempts: number of attempts to re-invoke any failed EnTK components
        :resubmit_failed: resubmit failed tasks (True/False)
        :autoterminate: terminate resource reservation upon execution of all tasks of first workflow (True/False)
        :write_workflow: write workflow and mapping to rts entities to a file (post-termination)
        :rts: Specify RTS to use. Current options: 'mock', 'radical.pilot' (default if unspecified)
        :rmq_cleanup: Cleanup all queues created in RabbitMQ server for current execution (default is True)
        :rts_config: Configuration for the RTS, accepts {"sandbox_cleanup": True/False,"db_cleanup": True/False} when RTS is RP
        :name: Name of the Application. It should be unique between executions. (default is randomly assigned)
    """
    def __init__(self,
                 config_path=None,
                 hostname=None,
                 port=None,
                 reattempts=None,
                 resubmit_failed=None,
                 autoterminate=None,
                 write_workflow=None,
                 rts=None,
                 rmq_cleanup=None,
                 rts_config=None,
                 name=None):

        # Create a session for each EnTK script execution
        if name:
            self._name = name
            self._sid = name
        else:
            self._name = str()
            self._sid = ru.generate_id('re.session', ru.ID_PRIVATE)

        self._read_config(config_path, hostname, port, reattempts,
                          resubmit_failed, autoterminate, write_workflow, rts,
                          rmq_cleanup, rts_config)

        # Create an uid + logger + profiles for AppManager, under the sid
        # namespace
        path = os.getcwd() + '/' + self._sid
        self._uid = ru.generate_id('appmanager.%(item_counter)04d',
                                   ru.ID_CUSTOM,
                                   namespace=self._sid)
        self._logger = ru.Logger('radical.entk.%s' % self._uid,
                                 path=path,
                                 targets=['2', '.'])
        self._prof = ru.Profiler(name='radical.entk.%s' % self._uid, path=path)
        self._report = ru.Reporter(name='radical.entk.%s' % self._uid)

        self._report.info('EnTK session: %s\n' % self._sid)
        self._prof.prof('create amgr obj', uid=self._uid)
        self._report.info('Creating AppManager')

        self._resource_manager = None
        # RabbitMQ Queues
        self._pending_queue = list()
        self._completed_queue = list()

        # Global parameters to have default values
        self._mqs_setup = False
        self._resource_desc = None
        self._task_manager = None
        self._workflow = None
        self._cur_attempt = 1
        self._shared_data = list()

        self._rmq_ping_interval = os.getenv('RMQ_PING_INTERVAL', 10)

        self._logger.info('Application Manager initialized')
        self._prof.prof('amgr obj created', uid=self._uid)
        self._report.ok('>>ok\n')

    def _read_config(self, config_path, hostname, port, reattempts,
                     resubmit_failed, autoterminate, write_workflow, rts,
                     rmq_cleanup, rts_config):

        if not config_path:
            config_path = os.path.dirname(os.path.abspath(__file__))

        config = ru.read_json(os.path.join(config_path, 'config.json'))

        self._mq_hostname = hostname if hostname else str(config['hostname'])
        self._port = port if port else config['port']
        self._reattempts = reattempts if reattempts else config['reattempts']
        self._resubmit_failed = resubmit_failed if resubmit_failed is not None else config[
            'resubmit_failed']
        self._autoterminate = autoterminate if autoterminate is not None else config[
            'autoterminate']
        self._write_workflow = write_workflow if write_workflow is not None else config[
            'write_workflow']
        self._rts = rts if rts in ['radical.pilot', 'mock'] else str(
            config['rts'])
        self._rmq_cleanup = rmq_cleanup if rmq_cleanup is not None else config[
            'rmq_cleanup']
        self._rts_config = rts_config if rts_config is not None else config[
            'rts_config']

        self._num_pending_qs = config['pending_qs']
        self._num_completed_qs = config['completed_qs']

    # ------------------------------------------------------------------------------------------------------------------
    # Getter functions
    # ------------------------------------------------------------------------------------------------------------------

    @property
    def name(self):
        """
        Name for the application manager. Allows the user to setup the name of 
        the application manager, as well as, its session ID. This name should be
        unique between different EnTK executions, otherwise it will produce an 
        error.

        :getter: Returns the name of the application manager
        :setter: Assigns the name of the application manager
        :type: String
        """

        return self._name

    @property
    def sid(self):
        """
        Get the session ID of the current EnTK execution

        :getter: Returns the session ID of the EnTK execution
        :type: String
        """

        return self._sid

    @property
    def resource_desc(self):
        """
        :getter: Returns the resource description
        :setter: Assigns a resource description
        """

        return self._resource_desc

    @property
    def workflow(self):
        """
        :getter: Return the workflow assigned for execution
        :setter: Assign workflow to be executed
        """

        return self._workflow

    @property
    def shared_data(self):
        """
        :getter: Return list of filenames that are shared between multiple tasks of the application
        :setter: Assign a list of names of files that need to be staged to the remote machine
        """

        return self._shared_data

    # ------------------------------------------------------------------------------------------------------------------
    # Setter functions
    # ------------------------------------------------------------------------------------------------------------------

    @name.setter
    def name(self, value):

        if not isinstance(value, str):
            raise TypeError(expected_type=str, actual_type=type(value))

        else:
            self._name = value

    @resource_desc.setter
    def resource_desc(self, value):

        if self._rts == 'radical.pilot':
            from radical.entk.execman.rp import ResourceManager
            self._resource_manager = ResourceManager(
                resource_desc=value,
                sid=self._sid,
                rts_config=self._rts_config)
        elif self._rts == 'mock':
            from radical.entk.execman.mock import ResourceManager
            self._resource_manager = ResourceManager(resource_desc=value,
                                                     sid=self._sid)

        self._report.info('Validating and assigning resource manager')

        if self._resource_manager._validate_resource_desc():
            self._resource_manager._populate()
            self._resource_manager.shared_data = self._shared_data
        else:
            self._logger.error('Could not validate resource description')
            raise
        self._report.ok('>>ok\n')

    @workflow.setter
    def workflow(self, workflow):

        self._prof.prof('assigning workflow', uid=self._uid)

        for p in workflow:
            if not isinstance(p, Pipeline):
                self._logger.info('workflow type incorrect')
                raise TypeError(expected_type=['Pipeline', 'set of Pipelines'],
                                actual_type=type(p))

            p._validate()

        self._workflow = workflow
        self._logger.info('Workflow assigned to Application Manager')

    @shared_data.setter
    def shared_data(self, data):

        if not isinstance(data, list):
            data = [data]

        for val in data:
            if not isinstance(val, str):
                raise TypeError(expected_type=str, actual_type=type(val))

        if self._resource_manager:
            self._resource_manager.shared_data = data

    # ------------------------------------------------------------------------------------------------------------------
    # Public methods
    # ------------------------------------------------------------------------------------------------------------------

    def run(self):
        """
        **Purpose**: Run the application manager. Once the workflow and resource manager have been assigned. Invoking this
        method will start the setting up the communication infrastructure, submitting a resource request and then
        submission of all the tasks.
        """

        try:

            # Set None objects local to each run
            self._wfp = None
            self._sync_thread = None
            self._terminate_sync = Event()
            self._resubmit_failed = False
            self._cur_attempt = 1

            if not self._workflow:
                self._logger.error(
                    'No workflow assigned currently, please check your script')
                raise MissingError(obj=self._uid, missing_attribute='workflow')

            if not self._resource_manager:
                self._logger.error(
                    'No resource manager assigned currently, please create and add a valid resource manager'
                )
                raise MissingError(obj=self._uid,
                                   missing_attribute='resource_manager')

            self._prof.prof('amgr run started', uid=self._uid)

            # Setup rabbitmq stuff
            if not self._mqs_setup:

                self._report.info('Setting up RabbitMQ system')
                setup = self._setup_mqs()

                if not setup:
                    self._logger.error('RabbitMQ system not available')
                    raise EnTKError(text="RabbitMQ setup failed")

                self._mqs_setup = True

                self._report.ok('>>ok\n')

            # Create WFProcessor object
            self._prof.prof('creating wfp obj', uid=self._uid)
            self._wfp = WFprocessor(sid=self._sid,
                                    workflow=self._workflow,
                                    pending_queue=self._pending_queue,
                                    completed_queue=self._completed_queue,
                                    mq_hostname=self._mq_hostname,
                                    port=self._port,
                                    resubmit_failed=self._resubmit_failed)
            self._wfp._initialize_workflow()
            self._workflow = self._wfp.workflow

            # Submit resource request if not resource allocation done till now or
            # resubmit a new one if the old one has completed
            if self._resource_manager:
                res_alloc_state = self._resource_manager.get_resource_allocation_state(
                )
                if (not res_alloc_state) or (
                        res_alloc_state
                        in self._resource_manager.get_completed_states()):

                    self._logger.info('Starting resource request submission')
                    self._prof.prof('init rreq submission', uid=self._uid)
                    self._resource_manager._submit_resource_request()

            else:

                self._logger.error(
                    'Cannot run without resource manager, please create and assign a resource manager'
                )
                raise EnTKError(text='Missing resource manager')

            # Start synchronizer thread
            if not self._sync_thread:
                self._logger.info('Starting synchronizer thread')
                self._sync_thread = Thread(target=self._synchronizer,
                                           name='synchronizer-thread')
                self._prof.prof('starting synchronizer thread', uid=self._uid)
                self._sync_thread.start()

            # Start WFprocessor
            self._logger.info('Starting WFProcessor process from AppManager')
            self._wfp.start_processor()

            self._report.ok('All components created\n')

            # Create tmgr object only if it does not already exist
            if self._rts == 'radical.pilot':
                from radical.entk.execman.rp import TaskManager
            elif self._rts == 'mock':
                from radical.entk.execman.mock import TaskManager

            if not self._task_manager:
                self._prof.prof('creating tmgr obj', uid=self._uid)
                self._task_manager = TaskManager(
                    sid=self._sid,
                    pending_queue=self._pending_queue,
                    completed_queue=self._completed_queue,
                    mq_hostname=self._mq_hostname,
                    rmgr=self._resource_manager,
                    port=self._port)
                self._logger.info(
                    'Starting task manager process from AppManager')
                self._task_manager.start_manager()
                self._task_manager.start_heartbeat()

            active_pipe_count = len(self._workflow)
            finished_pipe_uids = []

            # We wait till all pipelines of the workflow are marked
            # complete
            while ((active_pipe_count > 0)
                   and (self._wfp.workflow_incomplete())
                   and (self._resource_manager.get_resource_allocation_state()
                        not in self._resource_manager.get_completed_states())):

                if active_pipe_count > 0:

                    for pipe in self._workflow:

                        with pipe.lock:

                            if (pipe.completed) and (
                                    pipe.uid not in finished_pipe_uids):

                                self._logger.info('Pipe %s completed' %
                                                  pipe.uid)
                                finished_pipe_uids.append(pipe.uid)
                                active_pipe_count -= 1
                                self._logger.info('Active pipes: %s' %
                                                  active_pipe_count)

                if (not self._sync_thread.is_alive()) and (self._cur_attempt <=
                                                           self._reattempts):

                    self._sync_thread = Thread(target=self._synchronizer,
                                               name='synchronizer-thread')
                    self._logger.info('Restarting synchronizer thread')
                    self._prof.prof('restarting synchronizer', uid=self._uid)
                    self._sync_thread.start()

                    self._cur_attempt += 1

                if (not self._wfp.check_processor()) and (self._cur_attempt <=
                                                          self._reattempts):
                    """
                    If WFP dies, both child threads are also cleaned out.
                    We simply recreate the wfp object with a copy of the workflow
                    in the appmanager and start the processor.
                    """

                    self._prof.prof('recreating wfp obj', uid=self._uid)
                    self._wfp = WFProcessor(
                        sid=self._sid,
                        workflow=self._workflow,
                        pending_queue=self._pending_queue,
                        completed_queue=self._completed_queue,
                        mq_hostname=self._mq_hostname,
                        port=self._port,
                        resubmit_failed=self._resubmit_failed)

                    self._logger.info(
                        'Restarting WFProcessor process from AppManager')
                    self._wfp.start_processor()

                    self._cur_attempt += 1

                if (not self._task_manager.check_heartbeat()) and (
                        self._cur_attempt <= self._reattempts):
                    """
                    If the tmgr process or heartbeat dies, we simply start a
                    new process using the start_manager method. We do not
                    need to create a new instance of the TaskManager object
                    itself. We stop and start a new instance of the
                    heartbeat thread as well.
                    """
                    self._prof.prof('restarting tmgr process and heartbeat',
                                    uid=self._uid)

                    self._logger.info('Terminating heartbeat thread')
                    self._task_manager.terminate_heartbeat()
                    self._logger.info('Terminating tmgr process')
                    self._task_manager.terminate_manager()
                    self._logger.info('Restarting task manager process')
                    self._task_manager.start_manager()
                    self._logger.info('Restarting heartbeat thread')
                    self._task_manager.start_heartbeat()

                    self._cur_attempt += 1

            self._prof.prof('start termination', uid=self._uid)

            # Terminate threads in following order: wfp, helper, synchronizer
            self._logger.info('Terminating WFprocessor')
            self._wfp.terminate_processor()

            self._logger.info('Terminating synchronizer thread')
            self._terminate_sync.set()
            self._sync_thread.join()
            self._logger.info('Synchronizer thread terminated')

            if self._autoterminate:
                self.resource_terminate()

            if self._write_workflow:
                write_workflow(self._workflow, self._sid)

            self._prof.prof('termination done', uid=self._uid)

        except KeyboardInterrupt:

            self._prof.prof('start termination', uid=self._uid)

            self._logger.error(
                'Execution interrupted by user (you probably hit Ctrl+C), ' +
                'trying to cancel enqueuer thread gracefully...')

            # Terminate threads in following order: wfp, helper, synchronizer
            if self._wfp:
                self._logger.info('Terminating WFprocessor')
                self._wfp.terminate_processor()

            if self._task_manager:
                self._logger.info('Terminating task manager process')
                self._task_manager.terminate_manager()
                self._task_manager.terminate_heartbeat()

            if self._sync_thread:
                self._logger.info('Terminating synchronizer thread')
                self._terminate_sync.set()
                self._sync_thread.join()
                self._logger.info('Synchronizer thread terminated')

            if self._resource_manager:
                self._resource_manager._terminate_resource_request()

            self._prof.prof('termination done', uid=self._uid)

            raise KeyboardInterrupt

        except Exception, ex:

            self._prof.prof('start termination', uid=self._uid)

            self._logger.exception('Error in AppManager: %s' % ex)

            # Terminate threads in following order: wfp, helper, synchronizer
            if self._wfp:
                self._logger.info('Terminating WFprocessor')
                self._wfp.terminate_processor()

            if self._task_manager:
                self._logger.info('Terminating task manager process')
                self._task_manager.terminate_manager()
                self._task_manager.terminate_heartbeat()

            if self._sync_thread:
                self._logger.info('Terminating synchronizer thread')
                self._terminate_sync.set()
                self._sync_thread.join()
                self._logger.info('Synchronizer thread terminated')

            if self._resource_manager:
                self._resource_manager._terminate_resource_request()

            self._prof.prof('termination done', uid=self._uid)
            raise
class Discord:
    def __init__(self):
        pass

    channel_id = None  # enable dev mode on discord, right-click on the channel, copy ID
    bot_token = None  # get from the bot page. must be a bot, not a discord app
    gateway_url = "https://discordapp.com/api/gateway"
    postURL = None  # URL to post messages to, as the bot
    heartbeat_sent = 0
    heartbeat_interval = None
    last_sequence = None
    session_id = None
    web_socket = None  # WebSocket. Used for heartbeat.
    logger = logging  # Logger, uses default logging unless overridden
    headers = None  # Object containing the headers to send messages with.
    queue = []  # Message queue, stores messages until the bot reconnects.
    command = None  # Command parser
    status_callback = None  # The callback to use when the status changes.
    error_counter = 0  # The number of errors that have occured.
    me = None  # The Bots ID.

    # Threads:
    manager_thread = None
    heartbeat_thread = None
    listener_thread = None

    # Events
    restart_event = Event()  # Set to restart discord bot.
    shutdown_event = Event(
    )  # Set to stop all threads. Must also set restart_event

    def configure_discord(self,
                          bot_token,
                          channel_id,
                          logger,
                          command,
                          status_callback=None):
        self.shutdown_event.clear()
        self.restart_event.clear()
        self.bot_token = bot_token
        self.channel_id = channel_id
        if logger:
            self.logger = logger
        self.command = command
        self.status_callback = status_callback
        self.error_counter = 0

        if self.status_callback:
            self.status_callback(connected="disconnected")

        if self.channel_id is None or len(
                self.channel_id) != CHANNEL_ID_LENGTH:
            self.logger.error(
                "Incorrectly configured: Channel ID must be %d chars long." %
                CHANNEL_ID_LENGTH)
            self.shutdown_discord()
            return
        if self.bot_token is None or len(self.bot_token) != BOT_TOKEN_LENGTH:
            self.logger.error(
                "Incorrectly configured: Bot Token must be %d chars long." %
                BOT_TOKEN_LENGTH)
            self.shutdown_discord()
            return

        self.postURL = "https://discordapp.com/api/channels/{}/messages".format(
            self.channel_id)
        self.headers = {
            "Authorization": "Bot {}".format(self.bot_token),
            "User-Agent": "myBotThing (http://some.url, v0.1)"
        }

        if not self.manager_thread:
            self.manager_thread = Thread(target=self.monitor_thread)
            self.manager_thread.start()
        else:
            self.restart_event.set()

    def monitor_thread(self):
        while not self.shutdown_event.is_set():
            try:
                socket_url = None

                if self.status_callback:
                    self.status_callback(connected="connecting")

                while not self.shutdown_event.is_set() and socket_url is None:
                    try:
                        r = requests.get(self.gateway_url,
                                         headers=self.headers)
                        socket_url = json.loads(r.content)['url']
                        self.logger.info("Socket URL is %s", socket_url)
                        break
                    except Exception as e:
                        self.logger.error("Failed to connect to gateway: %s" %
                                          e.message)
                        time.sleep(5)
                        continue

                self.heartbeat_sent = 0
                self.web_socket = websocket.WebSocketApp(
                    socket_url,
                    on_message=self.on_message,
                    on_error=self.on_error,
                    on_close=self.on_close)

                self.listener_thread = Thread(
                    target=self.web_socket.run_forever,
                    kwargs={'ping_timeout': 1})
                self.listener_thread.start()
                self.logger.debug("WebSocket listener started")
                time.sleep(1)

                # Wait until we are told to restart
                self.restart_event.clear()
                self.restart_event.wait()
                self.logger.info("Restart Triggered")

            except Exception as e:
                self.logger.error(
                    "Exception occured, catching, ignoring, and restarting: %s",
                    e.message)

            finally:
                if self.status_callback:
                    self.status_callback(connected="disconnected")

                # Clean up resources
                if self.web_socket:
                    try:
                        # Shutdown with status 4000 to prevent the session being closed.
                        self.web_socket.close(status=4000)
                    except websocket.WebSocketConnectionClosedException as e:
                        self.logger.error("Failed to close websocket: %s" %
                                          e.message)
                    self.web_socket = None
                if self.listener_thread:
                    self.logger.info("Waiting for listener thread to join.")

                    self.listener_thread.join(timeout=60)
                    if self.listener_thread.is_alive():
                        self.logger.error(
                            "Listener thread has hung, leaking it now.")
                    else:
                        self.logger.info("Listener thread joined.")
                    self.listener_thread = None

    def shutdown_discord(self):
        self.logger.info("Shutdown has been triggered")
        self.shutdown_event.set()
        self.restart_event.set()

        if self.manager_thread:
            self.manager_thread.join(timeout=60)
            if self.manager_thread.is_alive():
                self.logger.error("Manager thread has hung, leaking it now.")
            else:
                self.logger.info("Manager thread joined.")
        self.manager_thread = None

        if self.heartbeat_thread:
            self.heartbeat_thread.join(timeout=60)
            if self.heartbeat_thread.is_alive():
                self.logger.error("HeartBeat thread has hung, leaking it now.")
            else:
                self.logger.info("HeartBeat thread joined.")
        self.heartbeat_thread = None

    def heartbeat(self):
        self.check_errors()
        while not self.shutdown_event.is_set():
            # Send heartbeat
            if self.heartbeat_sent > 1:
                self.logger.error(
                    "Haven't received a heartbeat ACK in a while")
                if self.status_callback:
                    self.status_callback(connected="disconnected")
                self.restart_event.set()
            elif self.web_socket:
                out = {'op': HEARTBEAT, 'd': self.last_sequence}
                js = json.dumps(out)
                try:
                    self.web_socket.send(js)
                    self.heartbeat_sent += 1
                    self.logger.info("Heartbeat: %s" % js)
                except Exception as exc:
                    self.logger.error("Exception caught: %s", unicode(exc))

            for i in range(self.heartbeat_interval / 1000):
                if not self.shutdown_event.is_set():
                    time.sleep(1)

    def on_message(self, message):
        js = json.loads(message)

        if js['op'] == HELLO:
            self.handle_hello(js)
        elif js['op'] == DISPATCH:
            self.handle_dispatch(js)
        elif js['op'] == HEARTBEAT_ACK:
            self.handle_heartbeat_ack()
        elif js['op'] == INVALID_SESSION:
            self.handle_invalid_session(js)
        else:
            self.logger.debug("Unhandled message: %s" % json.dumps(js))

    def handle_dispatch(self, js):
        if 's' in js and js['s'] is not None:
            self.last_sequence = js['s']

        data = js['d']
        dispatch_type = js['t']

        if not data or not dispatch_type:
            self.logger.debug("Invalid message type: %s" % json.dumps(js))
            return

        if dispatch_type == "READY":
            self.me = data['user']['id']
            return self.handle_ready(js)

        if dispatch_type == "RESUMED":
            self.logger.info("Successfully resumed")
            return

        self.logger.debug("Message was: %s" % json.dumps(js))

        if dispatch_type != "MESSAGE_CREATE":
            # Only care about message_create messages
            return

        if data['channel_id'] != self.channel_id:
            # Only care about messages from correct channel
            return

        user = data['author']['id']
        if self.me != None and user == self.me:
            # Don't respond to ourself.
            return

        if 'attachments' in data:
            for upload in data['attachments']:
                filename = upload['filename']
                url = upload['url']
                snapshots, embeds = self.command.download_file(
                    filename, url, user)
                self.send(embeds=embeds)

        if 'content' in data and len(data['content']) > 0:
            snapshots, embeds = self.command.parse_command(
                data['content'], user)
            self.send(snapshots=snapshots, embeds=embeds)

    def handle_hello(self, js):
        self.logger.info("Received HELLO message")

        # Authenticate/Resume
        if self.session_id:
            self.send_resume()
        else:
            self.send_identify()

        # Setup heartbeat_interval
        self.heartbeat_interval = js['d']['heartbeat_interval']

        # Debug output status of heartbeat thread.
        self.logger.info("Heartbeat thread: %s", self.heartbeat_thread)
        if self.heartbeat_thread:
            self.logger.info("Heartbeat thread is_alive(): %s",
                             self.heartbeat_thread.is_alive())

        # Setup heartbeat_thread
        if not self.heartbeat_thread or not self.heartbeat_thread.is_alive():
            self.logger.info("Starting Heartbeat thread")
            self.heartbeat_thread = Thread(target=self.heartbeat)
            self.heartbeat_thread.start()

        # Signal that we are connected.
        self.process_queue()

    def send_identify(self):
        self.logger.info("Sending IDENTIFY message")
        out = {
            "op": IDENTIFY,
            "d": {
                "token": self.bot_token,
                "properties": {},
                "compress": False,
                "large_threshold": 250
            }
        }
        out_js = json.dumps(out)
        self.web_socket.send(out_js)

    def send_resume(self):
        self.logger.info("Sending RESUME message")
        out = {
            'op': RESUME,
            'd': {
                'seq': self.last_sequence,
                'session_id': self.session_id,
                'token': self.bot_token
            }
        }
        js = json.dumps(out)
        self.web_socket.send(js)

    def handle_heartbeat_ack(self):
        self.logger.info("Received HEARTBEAT_ACK message")
        if self.status_callback:
            self.status_callback(connected="connected")
        if self.error_counter > 0:
            self.error_counter -= 0
        self.heartbeat_sent = 0
        self.process_queue()

    def handle_ready(self, js):
        self.logger.info("Received READY message")
        self.last_sequence = js['s']
        self.session_id = js['d']['session_id']

    def handle_invalid_session(self, js):
        self.logger.info("Received INVALID_SESSION message")
        self.session_id = None
        self.last_sequence = None
        time.sleep(5)
        self.send_identify()

    def process_queue(self):
        while not self.shutdown_event.is_set() and len(self.queue):
            snapshot, embed = self.queue.pop()
            if self._dispatch_message(snapshot=snapshot, embed=embed):
                continue
            else:
                break

    def send(self, snapshots=None, embeds=None):
        if snapshots is not None:
            for snapshot in snapshots:
                if not self._dispatch_message(snapshot=snapshot):
                    return False
        if embeds is not None:
            for embed in embeds:
                if not self._dispatch_message(embed=embed):
                    return False
        return True

    def _dispatch_message(self, snapshot=None, embed=None):
        data = None
        files = []

        if embed is not None:
            json_str = json.dumps({'embed': embed.get_embed()})
            data = {"payload_json": json_str}
            for attachment in embed.get_files():
                attachment[1].seek(0)
                files.append(("attachment", attachment))

        if snapshot:
            snapshot[1].seek(0)
            files.append(("file", snapshot))

        if len(files) == 0:
            files = None

        if files is None and data is None:
            return False

        while True:
            try:
                r = requests.post(self.postURL,
                                  headers=self.headers,
                                  data=data,
                                  files=files)
                if r:
                    return True
            except Exception as e:
                self.logger.debug(
                    "Failed to send the message, exception occured: %s", e)
                self.error_counter += 1
                self.check_errors()
                self.queue_message(snapshot, embed)
                return False

            if int(r.status_code) == 429:  # HTTP 429: Too many requests.
                retry_after = int(r.headers['Retry-After'])
                time.sleep(retry_after / 1000)
                continue
            else:
                self.logger.error("Failed to send message:")
                self.logger.error("\tResponse: %s" % self.log_safe(r))
                self.logger.error("\tResponse Content: %s" %
                                  self.log_safe(r.content))
                self.logger.error("\tResponse Headers: %s" %
                                  self.log_safe(r.headers))
                self.logger.error("\tURL: %s" % self.log_safe(self.postURL))
                self.logger.error("\tHeaders: %s" %
                                  self.log_safe(self.headers))
                self.logger.error("\tData: %s" % data)
                self.logger.error("\tFiles: %s" % files)
                self.error_counter += 1
                self.check_errors()
                self.queue_message(snapshot, embed)
                return False

    def on_error(self, error):
        self.logger.error("Connection error: %s" % error)
        self.restart_event.set()

    def on_close(self):
        self.logger.info("WebSocket Closed")
        self.restart_event.set()

    def queue_message(self, snapshot, embed):
        if snapshot is not None or embed is not None:
            self.logger.info("Message queued")
            self.queue.append((snapshot, embed))

    def check_errors(self):
        if self.error_counter > MAX_ERRORS:
            # More than MAX_ERRORS errors, in rapid succession,
            # best to shutdown and let the user restart.
            self.logger.error(
                "Had %s/%s errors in rapid succession, this is bad sign. "
                "Shutting down bot to avoid spam" %
                (self.error_counter, MAX_ERRORS))
            Thread(target=self.shutdown_discord()).start()

            if self.status_callback:
                self.status_callback(connected="disconnected")

    def log_safe(self, message):
        return unicode(message).replace(self.bot_token, "[bot_token]").replace(
            self.channel_id, "[channel_id]")
Example #44
0
class AsyncoreLoop(object):

    timer_resolution = 0.1  # used as the max interval to be in the io loop before returning to service timeouts

    _loop_dispatch_class = _AsyncorePipeDispatcher if os.name != 'nt' else _BusyWaitDispatcher

    def __init__(self):
        self._pid = os.getpid()
        self._loop_lock = Lock()
        self._started = False
        self._shutdown = False

        self._thread = None

        self._timers = TimerManager()

        try:
            dispatcher = self._loop_dispatch_class()
            dispatcher.validate()
            log.debug("Validated loop dispatch with %s",
                      self._loop_dispatch_class)
        except Exception:
            log.exception(
                "Failed validating loop dispatch with %s. Using busy wait execution instead.",
                self._loop_dispatch_class)
            dispatcher.close()
            dispatcher = _BusyWaitDispatcher()
        self._loop_dispatcher = dispatcher

        atexit.register(partial(_cleanup, weakref.ref(self)))

    def maybe_start(self):
        should_start = False
        did_acquire = False
        try:
            did_acquire = self._loop_lock.acquire(False)
            if did_acquire and not self._started:
                self._started = True
                should_start = True
        finally:
            if did_acquire:
                self._loop_lock.release()

        if should_start:
            self._thread = Thread(target=self._run_loop,
                                  name="cassandra_driver_event_loop")
            self._thread.daemon = True
            self._thread.start()

    def wake_loop(self):
        self._loop_dispatcher.notify_loop()

    def _run_loop(self):
        log.debug("Starting asyncore event loop")
        with self._loop_lock:
            while not self._shutdown:
                try:
                    self._loop_dispatcher.loop(self.timer_resolution)
                    self._timers.service_timeouts()
                except Exception:
                    log.debug("Asyncore event loop stopped unexepectedly",
                              exc_info=True)
                    break
            self._started = False

        log.debug("Asyncore event loop ended")

    def add_timer(self, timer):
        self._timers.add_timer(timer)

        # This function is called from a different thread than the event loop
        # thread, so for this call to be thread safe, we must wake up the loop
        # in case it's stuck at a select
        self.wake_loop()

    def _cleanup(self):
        global _dispatcher_map

        self._shutdown = True
        if not self._thread:
            return

        log.debug("Waiting for event loop thread to join...")
        self._thread.join(timeout=1.0)
        if self._thread.is_alive():
            log.warning(
                "Event loop thread could not be joined, so shutdown may not be clean. "
                "Please call Cluster.shutdown() to avoid this.")

        log.debug("Event loop thread was joined")

        # Ensure all connections are closed and in-flight requests cancelled
        for conn in tuple(_dispatcher_map.values()):
            if conn is not self._loop_dispatcher:
                conn.close()
        self._timers.service_timeouts()
        # Once all the connections are closed, close the dispatcher
        self._loop_dispatcher.close()

        log.debug("Dispatchers were closed")
def start_self_test(ns):
    from threading import Thread
    import time
    import sys

    class NullWriter(object):
        def write(self, arg):
            pass

    ns['1'] = ns['2'] = 'mm'
    (collection1, collection2) = get_collections('mm', 'mm', ns)
    (collection3, collection4) = get_collections('mm', 'mm', ns)

    collection2.options.mongo6050_enabled = False

    oldstdout = sys.stdout

    def tester_thread(c1, c2):
        test_forever(collection1=c1,
                     collection2=c2,
                     seed=random.random(),
                     update_tests_enabled=True,
                     sorting_tests_enabled=True,
                     indexes_enabled=False,
                     projections_enabled=True,
                     verbose=False)

    t1 = Thread(target=tester_thread, args=(collection1, collection2))
    t1.daemon = True

    t2 = Thread(target=tester_thread, args=(collection3, collection4))
    t2.daemon = True

    sys.stdout = NullWriter()

    t1.start()

    for i in range(1, 5):
        time.sleep(1)
        if not t1.is_alive():
            sys.stdout = oldstdout
            print 'SUCCESS: Test harness found artificial bug'
            break

    sys.stdout = oldstdout

    if t1.is_alive():
        print 'FAILURE: Test harness did not find obvious artificial bug in 5 seconds'

    sys.stdout = NullWriter()

    t2.start()

    for i in range(1, 5):
        time.sleep(1)
        if not t2.is_alive():
            sys.stdout = oldstdout
            print 'FAILURE: Test of model vs. itself did not match'
            return

    sys.stdout = oldstdout

    print 'SUCCESS: Model was consistent with itself'

def countdown(n):
    while n > 0:
        print('T-minus', n)
        n -= 1
        time.sleep(1)


from threading import Thread
t = Thread(target=countdown, args=(5, ), daemon=True)
t.start()

while True:
    print("running")
    if t.is_alive():
        print("Still alive")
    else:
        print("Dead")
        break
    time.sleep(1)


class CountDownTask:
    def __init__(self):
        self._running = True

    def terminate(self):
        self._running = False

    def run(self, n):
Example #47
0
File: cli.py Project: ur1katz/catt
def cast(settings, video_url, subtitle, force_default, random_play, no_subs):
    controller = "default" if force_default else None
    cst, stream = setup_cast(settings["device"],
                             video_url=video_url,
                             prep="app",
                             controller=controller)

    if stream.is_local_file:
        click.echo("Casting local file %s..." % video_url)
        click.echo("Playing %s on \"%s\"..." %
                   (stream.video_title, cst.cc_name))
        if subtitle is None and no_subs:
            subtitle_url = None
        else:
            subtitle_url = load_subtitle_if_exists(subtitle, video_url,
                                                   stream.local_ip,
                                                   stream.port + 1)

        thr = Thread(target=serve_file,
                     args=(video_url, stream.local_ip, stream.port,
                           stream.guessed_content_type))

        thr.setDaemon(True)
        thr.start()
        cst.play_media_url(stream.video_url,
                           content_type=stream.guessed_content_type,
                           title=stream.video_title,
                           subtitles=subtitle_url)
        click.echo("Serving local file, press Ctrl+C when done.")
        while thr.is_alive():
            time.sleep(1)

    elif stream.is_playlist:
        if stream.playlist_length == 0:
            cst.kill(idle_only=True)
            raise CattCliError("Playlist is empty.")
        click.echo("Casting remote playlist %s..." % video_url)
        if random_play:
            stream.set_playlist_entry(
                random.randrange(0, stream.playlist_length))
        else:
            try:
                if not stream.playlist_all_ids:
                    raise ValueError
                cst.play_playlist(stream.playlist_all_ids)
                return
            except (PlaybackError, ValueError):
                warning("Playlist playback not possible, playing first video.")
                stream.set_playlist_entry(0)
        click.echo("Playing %s on \"%s\"..." %
                   (stream.playlist_entry_title, cst.cc_name))
        if cst.info_type == "url":
            cst.play_media_url(stream.playlist_entry_url,
                               title=stream.playlist_entry_title,
                               thumb=stream.playlist_entry_thumbnail,
                               content_type=stream.guessed_content_type)
        elif cst.info_type == "id":
            cst.play_media_id(stream.playlist_entry_id)

    else:
        click.echo("Casting remote file %s..." % video_url)
        click.echo("Playing %s on \"%s\"..." %
                   (stream.video_title, cst.cc_name))
        if cst.info_type == "url":
            cst.play_media_url(stream.video_url,
                               title=stream.video_title,
                               thumb=stream.video_thumbnail,
                               content_type=stream.guessed_content_type)
        elif cst.info_type == "id":
            cst.play_media_id(stream.video_id)
Example #48
0
class StupidNode:
    pubkey = privkey = None
    channel = ""  # subscription filter or something (I think)
    PORTS = 4  # as we add or remove ports, make sure this is the number of ports a StupidNode uses

    def __init__(self, endpoint="*", identity=None, keyring=DEFAULT_KEYRING):
        self.keyring = keyring
        self.endpoint = (endpoint if isinstance(endpoint, Endpoint) else
                         Endpoint(endpoint))
        self.endpoints = list()
        self.identity = identity or f"{gethostname()}-{self.endpoint.pub}"
        self.log = logging.getLogger(f"{self.identity}")

        self.log.debug("begin node setup / creating context")

        self.ctx = zmq.Context()
        self.cleartext_ctx = zmq.Context()

        self.start_auth()

        self.log.debug("creating sockets")

        self.pub = self.mk_socket(zmq.PUB)
        self.router = self.mk_socket(zmq.ROUTER)
        self.router.router_mandatory = (
            1  # one of the few opts that can be set after bind()
        )
        self.rep = self.mk_socket(zmq.REP, enable_curve=False)

        self.sub = list()
        self.dealer = list()

        self.log.debug("binding sockets")

        self.bind(self.pub)
        self.bind(self.router)
        self.bind(self.rep, enable_curve=False)

        self.log.debug("registering polling")

        self.poller = zmq.Poller()
        self.poller.register(self.router, zmq.POLLIN)

        self.log.debug("configuring interrupt signal")
        signal.signal(signal.SIGINT, self.interrupt)

        self.log.debug("configuring WAI Reply Thread")
        self._who_are_you_thread = Thread(
            target=self.who_are_you_reply_machine)
        self._who_are_you_continue = True
        self._who_are_you_thread.start()

        self.route_queue = deque(list(), ROUTE_QUEUE_LEN)
        self.routes = dict()

        self.log.debug("node setup complete")

    def who_are_you_reply_machine(self):
        while self._who_are_you_continue:
            if self.rep.poll(200):
                self.log.debug("wai polled, trying to recv")
                msg = self.rep.recv()
                ttype = zmq_socket_type_name(self.rep)
                self.log.debug('received "%s" over %s socket', msg, ttype)
                msg = [self.identity.encode(), self.pubkey]
                self.log.debug('sending "%s" as reply over %s socket', msg,
                               ttype)
                self.rep.send_multipart(msg)
        self.log.debug("wai thread seems finished, loop broken")

    def start_auth(self):
        self.log.debug("starting auth thread")
        self.auth = ThreadAuthenticator(self.ctx)
        self.auth.start()
        self.auth.allow("127.0.0.1")
        self.auth.configure_curve(domain="*", location=self.keyring)
        self.load_or_create_key()

    @property
    def key_basename(self):
        return scrub_identity_name_for_certfile(self.identity)

    @property
    def key_filename(self):
        return os.path.join(self.keyring, self.key_basename + ".key")

    @property
    def secret_key_filename(self):
        return self.key_filename + "_secret"

    def load_key(self):
        self.log.debug("loading node key-pair")
        self.pubkey, self.privkey = zmq.auth.load_certificate(
            self.secret_key_filename)

    def load_or_create_key(self):
        try:
            self.load_key()
        except IOError as e:
            self.log.debug("error loading key: %s", e)
            self.log.debug("creating node key-pair")
            os.makedirs(self.keyring, mode=0o0700, exist_ok=True)
            zmq.auth.create_certificates(self.keyring, self.key_basename)
            self.load_key()

    def preprocess_message(self, msg, msg_class=TaggedMessage):
        if not isinstance(msg, msg_class):
            if not isinstance(msg, (list, tuple)):
                msg = (msg, )
            msg = msg_class(*msg, name=self.identity)
        rmsg = repr(msg)
        emsg = msg.encode()
        return msg, rmsg, emsg

    def route_failed(self, msg):
        if not isinstance(msg, RoutedMessage):
            raise TypeError("msg must already be a RoutedMessage")
        msg.failures += 1
        if msg.failures <= 5:
            self.log.debug("(re)queueing %s for later delivery", repr(msg))
            if len(self.route_queue) == self.route_queue.maxlen:
                self.log.error("route_queue full, discarding %s",
                               repr(self.route_queue[0]))
            self.route_queue.append(msg)
        else:
            self.log.error("discarding %s after %d failures", repr(msg),
                           msg.failures)

    def route_message(self, to, msg):
        if isinstance(to, StupidNode):
            to = to.identity
        if isinstance(to, (list, tuple)):
            to = to[-1]
        R = self.routes.get(to)
        if R:
            to = (R[0], to)
        if isinstance(msg, RoutedMessage):
            msg.to = to
        else:
            # preprocess passes *msg to msg_class() -- ie, RoutedMessage(to, *msg)
            if isinstance(msg, list):
                msg = tuple(msg)
            elif not isinstance(msg, tuple):
                msg = (msg, )
            msg = (to, ) + msg
        tmsg, rmsg, emsg = self.preprocess_message(msg,
                                                   msg_class=RoutedMessage)
        self.log.debug("routing message %s -- encoding: %s", rmsg, emsg)
        try:
            self.router.send_multipart(emsg)
        except zmq.error.ZMQError as zmq_e:
            self.log.debug("route to %s failed: %s", to, zmq_e)
            if "Host unreachable" not in str(zmq_e):
                raise
            self.route_failed(tmsg)

    def deal_message(self, msg):
        self.log.debug(
            "dealing message (actually publishing with no_publish=True)")
        self.publish_message(msg, no_publish=True)

    def publish_message(self,
                        msg,
                        no_deal=False,
                        no_deal_to=None,
                        no_publish=False):
        tmsg, rmsg, emsg = self.preprocess_message(msg)
        self.log.debug(
            "publishing message %s no_publish=%s, no_deal=%s, no_deal_to=%s",
            rmsg,
            no_publish,
            no_deal,
            no_deal_to,
        )
        self.local_workflow(tmsg)
        if not no_publish:
            self.pub.send_multipart(emsg)
        if no_deal:
            return
        if no_deal_to is None:
            ok_send = lambda x: True
        elif callable(no_deal_to):
            ok_send = no_deal_to
        elif isinstance(no_deal_to, zmq.Socket):
            npt_i = self.dealer.index(no_deal_to)
            ok_send = lambda x: x != npt_i
        elif isinstance(no_deal_to, int):
            ok_send = lambda x: x != no_deal_to
        elif isinstance(no_deal_to, (list, tuple)):
            ok_send = lambda x: x not in no_deal_to
        for i, sock in enumerate(self.dealer):
            if ok_send(i):
                self.log.debug("dealing message %s to %s", rmsg,
                               self.endpoints[i])
                sock.send_multipart(emsg)
            else:
                self.log.debug("not sending %s to %s", rmsg, self.endpoints[i])

    def mk_socket(self, stype, enable_curve=True):
        # defaults:
        # socket.setsockopt(zmq.LINGER, -1) # infinite
        # socket.setsockopt(zmq.IDENTITY, None)
        # socket.setsockopt(zmq.TCP_KEEPALIVE, -1)
        # socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, -1)
        # socket.setsockopt(zmq.TCP_KEEPALIVE_CNT, -1)
        # socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, -1)
        # socket.setsockopt(zmq.RECONNECT_IVL, 100)
        # socket.setsockopt(zmq.RECONNECT_IVL_MAX, 0) # 0 := always use IVL

        # the above can be accessed as attributes instead (they are case
        # insensitive, we choose lower case below so it looks like boring
        # python)

        if enable_curve:
            socket = self.ctx.socket(stype)
            self.log.debug("create %s socket in crypto context",
                           zmq_socket_type_name(stype))
        else:
            socket = self.cleartext_ctx.socket(stype)
            self.log.debug("create %s socket in cleartext context",
                           zmq_socket_type_name(stype))

        socket.linger = 1
        socket.identity = self.identity.encode()
        socket.reconnect_ivl = 1000
        socket.reconnect_ivl_max = 10000

        if enable_curve:
            socket.curve_secretkey = self.privkey
            socket.curve_publickey = self.pubkey

        return socket

    def local_workflow(self, msg):
        self.log.debug("start local_workflow %s", repr(msg))
        msg = self.local_react(msg)
        if msg:
            msg = self.all_react(msg)
        return msg

    def sub_workflow(self, socket):
        idx = self.sub.index(socket)
        enp = self.endpoints[idx]
        msg = self.sub_receive(socket, idx)
        self.log.debug("start sub_workflow (idx=%d -> endpoint=%s) %s", idx,
                       enp, repr(msg))
        for react in (self.sub_react, self.nonlocal_react, self.all_react):
            if msg:
                msg = react(msg, idx=idx)
        self.log.debug("end sub_workflow")
        return msg

    def router_workflow(self):
        msg = self.router_receive()
        self.log.debug("start router_workflow %s", repr(msg))
        for react in (self.router_react, self.nonlocal_react, self.all_react):
            if not msg:
                break
            msg = react(msg)
        self.log.debug("end router_workflow")
        return msg

    def dealer_workflow(self, socket):
        idx = self.dealer.index(socket)
        enp = self.endpoints[idx]
        msg = self.dealer_receive(socket, idx)
        self.log.debug("start deal_workflow (idx=%d -> endpoint=%s) %s", idx,
                       enp, repr(msg))
        for react in (self.dealer_react, self.nonlocal_react, self.all_react):
            if not msg:
                break
            msg = react(msg, idx=idx)
        self.log.debug("end deal_workflow")
        return msg

    def sub_receive(self, socket, idx):  # pylint: disable=unused-argument
        return TaggedMessage(*socket.recv_multipart())

    def dealer_receive(self, socket, idx):  # pylint: disable=unused-argument
        msg = socket.recv_multipart()
        rm = RoutedMessage.decode(msg)
        if rm:
            return rm
        # dealer's always receive a routed message if it doesn't appear to be
        # routed, then it's simply intended for us. In that case, build a
        # tagged message and mark it as non-publish
        msg = TaggedMessage(*msg)
        msg.publish_mark = False
        return msg

    def router_receive(self):
        # we ignore the source ID (in '_') and just believe the msg.tag.name ... it's
        # roughly the same thing anyway
        _, *msg = self.router.recv_multipart()
        rm = RoutedMessage.decode(msg)
        if rm:
            return rm
        return TaggedMessage(*msg)

    def all_react(self, msg, idx=None):  # pylint: disable=unused-argument
        return msg

    def sub_react(self, msg, idx=None):  # pylint: disable=unused-argument
        return msg

    def dealer_react(self, msg, idx=None):  # pylint: disable=unused-argument
        return msg

    def router_react(self, msg):
        return msg

    def nonlocal_react(self, msg, idx=None):
        if isinstance(msg, RoutedMessage):
            msg = self.routed_react(msg, idx=idx)
        return msg

    def local_react(self, msg):
        return msg

    def routed_react(self, msg, idx=None):  # pylint: disable=unused-argument
        return False

    def poll(self, timeo=500, other_cb=None):
        """Check to see if there's any incoming messages. If anything seems ready to receive,
        invoke the related workflow or invoke other_cb (if given) on the socket item.
        """
        items = dict(self.poller.poll(timeo))
        ret = list()
        for item in items:
            if items[item] != zmq.POLLIN:
                continue
            if item in self.sub:
                res = self.sub_workflow(item)
            elif item in self.dealer:
                res = self.dealer_workflow(item)
            elif item is self.router:
                res = self.router_workflow()
            elif callable(other_cb):
                res = other_cb(item)
            else:
                res = None
                if False and isinstance(item, zmq.Socket):
                    self.log.error(
                        "no workflow defined for socket of type %s -- received: %s",
                        zmq_socket_type_name(item),
                        item.recv_multipart(),
                    )
                else:
                    self.log.error(
                        "no workflow defined for socket of type %s -- regarding as fatal",
                        zmq_socket_type_name(item),
                    )
                    # note: this normally doesn't trigger an exit... thanks threading
                    raise Exception("unhandled poll item")
            if isinstance(res, TaggedMessage):
                ret.append(res)
        return ret

    def interrupt(self, signo, eframe):  # pylint: disable=unused-argument
        print(" kaboom")
        self.closekill()
        sys.exit(0)

    def closekill(self):
        if hasattr(self, "auth") and self.auth is not None:
            if self.auth.is_alive():
                self.log.debug("trying to stop auth thread")
                self.auth.stop()
                self.log.debug("auth thread seems to have stopped")
            del self.auth

        if hasattr(self, "_who_are_you_thread"):
            if self._who_are_you_thread.is_alive():
                self.log.debug("WAI Thread seems to be alive, trying to join")
                self._who_are_you_continue = False
                self._who_are_you_thread.join()
                self.log.debug("WAI Thread seems to jave joined us.")
            del self._who_are_you_thread

        if hasattr(self, "cleartext_ctx"):
            self.log.debug("destroying cleartext context")
            self.cleartext_ctx.destroy(1)
            del self.cleartext_ctx

        if hasattr(self, "ctx"):
            self.log.debug("destroying crypto context")
            self.ctx.destroy(1)
            del self.ctx

    def __del__(self):
        self.log.debug("%s is being deleted", self)
        self.closekill()

    def bind(self, socket, enable_curve=True):
        if enable_curve:
            socket.curve_server = True  # must come before bind
        try:
            f = self.endpoint.format(socket.type)
            socket.bind(f)
        except zmq.ZMQError as e:
            raise zmq.ZMQError(f"unable to bind {f}: {e}") from e

    def who_are_you_request(self, endpoint):
        req = self.mk_socket(zmq.REQ, enable_curve=False)
        req.connect(endpoint.format(zmq.REQ))
        msg = b"Who are you?"
        self.log.debug("sending cleartext request: %s", msg)
        req.send(msg)
        self.log.debug("waiting for reply")
        res = req.recv_multipart()
        self.log.debug("received reply: %s", res)
        if len(res) == 2:
            return res
        req.close()
        return None, None

    def pubkey_pathname(self, node_id):
        if isinstance(node_id, Endpoint):
            node_id = Endpoint.host
        fname = scrub_identity_name_for_certfile(node_id) + ".key"
        pname = os.path.join(self.keyring, fname)
        return pname

    def learn_or_load_endpoint_pubkey(self, endpoint):
        epubk_pname = self.pubkey_pathname(endpoint)
        if not os.path.isfile(epubk_pname):
            self.log.debug(
                "%s does not exist yet, trying to learn certificate",
                epubk_pname)
            node_id, public_key = self.who_are_you_request(endpoint)
            if node_id:
                endpoint.identity = node_id.decode()
                epubk_pname = self.pubkey_pathname(node_id)
                if not os.path.isfile(epubk_pname):
                    with open(epubk_pname, "wb") as fh:
                        fh.write(
                            b"# generated via rep/req pubkey transfer\n\n")
                        fh.write(b"metadata\n")
                        # NOTE: in zmq/auth/certs.py's _write_key_file,
                        # metadata should be key-value pairs; roughly like the
                        # following (although with their particular py2/py3
                        # nerosis edited out):
                        #
                        # f.write('metadata\n')
                        #     for k,v in metadata.items():
                        #         f.write(f"    {k} = {v}\n")
                        fh.write(b"curve\n")
                        fh.write(b'    public-key = "')
                        fh.write(public_key)
                        fh.write(b'"')
        self.log.debug("loading certificate %s", epubk_pname)
        ret, _ = zmq.auth.load_certificate(epubk_pname)
        return ret

    def connect_to_endpoints(self, *endpoints):
        self.log.debug("connecting remote endpoints")
        for item in endpoints:
            self.connect_to_endpoint(item)
        self.log.debug("remote endpoints connected")
        return self

    def _create_connected_socket(self,
                                 endpoint,
                                 stype,
                                 pubkey,
                                 preconnect=None):
        self.log.debug("creating %s socket to endpoint=%s",
                       zmq_socket_type_name(stype), endpoint)
        s = self.mk_socket(stype)
        s.curve_serverkey = pubkey
        if callable(preconnect):
            preconnect(s)
        s.connect(endpoint.format(stype))
        return s

    def connect_to_endpoint(self, endpoint):
        if isinstance(endpoint, StupidNode):
            endpoint = endpoint.endpoint

        elif not isinstance(endpoint, Endpoint):
            endpoint = Endpoint(endpoint)

        self.log.debug("learning or loading endpoint=%s pubkey", endpoint)
        epk = self.learn_or_load_endpoint_pubkey(endpoint)

        sos = lambda s: s.setsockopt_string(zmq.SUBSCRIBE, self.channel)
        sub = self._create_connected_socket(endpoint, zmq.SUB, epk, sos)
        self.poller.register(sub, zmq.POLLIN)
        self.sub.append(sub)

        deal = self._create_connected_socket(endpoint, zmq.DEALER, epk)
        self.poller.register(deal, zmq.POLLIN)
        self.dealer.append(deal)

        self.endpoints.append(endpoint)

        return self

    def __repr__(self):
        return f"{self.__class__.__name__}({self.identity})"
Example #49
0
def main():
    # Patch threading to make exceptions catchable.
    install_thread_excepthook()

    # Make sure exceptions get logged.
    sys.excepthook = handle_exception

    args = get_args()

    set_log_and_verbosity(log)

    # Abort if only-server and no-server are used together.
    if args.only_server and args.no_server:
        log.critical(
            "You can't use no-server and only-server at the same time, silly.")
        sys.exit(1)

    # Abort if status name is not valid.
    regexp = re.compile('^([\w\s\-.]+)$')
    if not regexp.match(args.status_name):
        log.critical('Status name contains illegal characters.')
        sys.exit(1)

    # Stop if we're just looking for a debug dump.
    if args.dump:
        log.info('Retrieving environment info...')
        hastebin_id = get_debug_dump_link()
        log.info('Done! Your debug link: https://hastebin.com/%s.txt',
                 hastebin_id)
        sys.exit(1)

    # Let's not forget to run Grunt / Only needed when running with webserver.
    if not args.no_server and not validate_assets(args):
        sys.exit(1)

    if args.no_version_check and not args.only_server:
        log.warning('You are running RocketMap in No Version Check mode. '
                    "If you don't know what you're doing, this mode "
                    'can have negative consequences, and you will not '
                    'receive support running in NoVC mode. '
                    'You have been warned.')

    position = extract_coordinates(args.location)

    # Use the latitude and longitude to get the local altitude from Google.
    (altitude, status) = get_gmaps_altitude(position[0], position[1],
                                            args.gmaps_key)
    if altitude is not None:
        log.debug('Local altitude is: %sm.', altitude)
        position = (position[0], position[1], altitude)
    else:
        if status == 'REQUEST_DENIED':
            log.error(
                'Google API Elevation request was denied. You probably ' +
                'forgot to enable elevation api in https://console.' +
                'developers.google.com/apis/api/elevation_backend/')
            sys.exit()
        else:
            log.error('Unable to retrieve altitude from Google APIs' +
                      'setting to 0')

    log.info('Parsed location is: %.4f/%.4f/%.4f (lat/lng/alt).', position[0],
             position[1], position[2])

    # Scanning toggles.
    log.info('Parsing of Pokemon %s.',
             'disabled' if args.no_pokemon else 'enabled')
    log.info('Parsing of Pokestops %s.',
             'disabled' if args.no_pokestops else 'enabled')
    log.info('Parsing of Gyms %s.', 'disabled' if args.no_gyms else 'enabled')
    log.info('Pokemon encounters %s.',
             'enabled' if args.encounter else 'disabled')
    log.info('Parsing of weather cells %s.',
             'disabled' if args.no_weather_cells else 'enabled')

    app = None
    if not args.no_server and not args.clear_db:
        app = Pogom(__name__,
                    root_path=os.path.dirname(
                        os.path.abspath(__file__)).decode('utf8'))
        app.before_request(app.validate_request)
        app.set_current_location(position)

    db = startup_db(app, args.clear_db)

    args.root_path = os.path.dirname(os.path.abspath(__file__))

    # Control the search status (running or not) across threads.
    control_flags = {
        'on_demand': Event(),
        'api_watchdog': Event(),
        'search_control': Event()
    }

    for flag in control_flags.values():
        flag.clear()

    if args.on_demand_timeout > 0:
        control_flags['on_demand'].set()

    heartbeat = [now()]

    # Setup the location tracking queue and push the first location on.
    new_location_queue = Queue()
    new_location_queue.put(position)

    # DB Updates
    db_updates_queue = Queue()

    # Thread(s) to process database updates.
    for i in range(args.db_threads):
        log.debug('Starting db-updater worker thread %d', i)
        t = Thread(target=db_updater,
                   name='db-updater-{}'.format(i),
                   args=(db_updates_queue, db))
        t.daemon = True
        t.start()

    # Database cleaner; really only need one ever.
    if args.enable_clean:
        t = Thread(target=clean_db_loop, name='db-cleaner', args=(args, ))
        t.daemon = True
        t.start()

    # WH updates queue & WH unique key LFU caches.
    # The LFU caches will stop the server from resending the same data an
    # infinite number of times. The caches will be instantiated in the
    # webhook's startup code.
    wh_updates_queue = Queue()
    wh_key_cache = {}

    if not args.wh_types:
        log.info('Webhook disabled.')
    else:
        log.info('Webhook enabled for events: sending %s to %s.',
                 args.wh_types, args.webhooks)

        # Thread to process webhook updates.
        for i in range(args.wh_threads):
            log.debug('Starting wh-updater worker thread %d', i)
            t = Thread(target=wh_updater,
                       name='wh-updater-{}'.format(i),
                       args=(args, wh_updates_queue, wh_key_cache))
            t.daemon = True
            t.start()

    if not args.only_server:
        # Speed limit.
        log.info(
            'Scanning speed limit %s.',
            'set to {} km/h'.format(args.kph) if args.kph > 0 else 'disabled')
        log.info(
            'High-level speed limit %s.', 'set to {} km/h'.format(
                args.hlvl_kph) if args.hlvl_kph > 0 else 'disabled')

        # Check if we are able to scan.
        if not can_start_scanning(args):
            sys.exit(1)

        initialize_proxies(args)

        # Monitor files, update data if they've changed recently.
        # Keys are 'args' object keys, values are filenames to load.
        files_to_monitor = {}

        if args.encounter:
            files_to_monitor['enc_whitelist'] = args.enc_whitelist_file
            log.info('Encounters are enabled.')
        else:
            log.info('Encounters are disabled.')

        if args.webhook_blacklist_file:
            files_to_monitor['webhook_blacklist'] = args.webhook_blacklist_file
            log.info('Webhook blacklist is enabled.')
        elif args.webhook_whitelist_file:
            files_to_monitor['webhook_whitelist'] = args.webhook_whitelist_file
            log.info('Webhook whitelist is enabled.')
        else:
            log.info('Webhook whitelist/blacklist is disabled.')

        if files_to_monitor:
            t = Thread(target=dynamic_loading_refresher,
                       name='dynamic-enclist',
                       args=(files_to_monitor, ))
            t.daemon = True
            t.start()
            log.info('Dynamic list refresher is enabled.')
        else:
            log.info('Dynamic list refresher is disabled.')

        # Update player locale if not set correctly yet.
        args.player_locale = PlayerLocale.get_locale(args.location)
        if not args.player_locale:
            args.player_locale = gmaps_reverse_geolocate(
                args.gmaps_key, args.locale,
                str(position[0]) + ', ' + str(position[1]))
            db_player_locale = {
                'location': args.location,
                'country': args.player_locale['country'],
                'language': args.player_locale['country'],
                'timezone': args.player_locale['timezone'],
            }
            db_updates_queue.put((PlayerLocale, {0: db_player_locale}))
        else:
            log.debug('Existing player locale has been retrieved from the DB.')

        # Gather the Pokemon!
        argset = (args, new_location_queue, control_flags, heartbeat,
                  db_updates_queue, wh_updates_queue)

        log.debug('Starting a %s search thread', args.scheduler)
        search_thread = Thread(target=search_overseer_thread,
                               name='search-overseer',
                               args=argset)
        search_thread.daemon = True
        search_thread.start()

    if args.no_server:
        # This loop allows for ctrl-c interupts to work since flask won't be
        # holding the program open.
        while search_thread.is_alive():
            time.sleep(60)
    else:
        # Dynamic rarity.
        if args.rarity_update_frequency:
            t = Thread(target=dynamic_rarity_refresher, name='dynamic-rarity')
            t.daemon = True
            t.start()
            log.info('Dynamic rarity is enabled.')
        else:
            log.info('Dynamic rarity is disabled.')

        if args.cors:
            CORS(app)

        # No more stale JS.
        init_cache_busting(app)

        app.set_control_flags(control_flags)
        app.set_heartbeat_control(heartbeat)
        app.set_location_queue(new_location_queue)
        ssl_context = None
        if (args.ssl_certificate and args.ssl_privatekey
                and os.path.exists(args.ssl_certificate)
                and os.path.exists(args.ssl_privatekey)):
            ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
            ssl_context.load_cert_chain(args.ssl_certificate,
                                        args.ssl_privatekey)
            log.info('Web server in SSL mode.')
        if args.verbose:
            app.run(threaded=True,
                    use_reloader=False,
                    debug=True,
                    host=args.host,
                    port=args.port,
                    ssl_context=ssl_context)
        else:
            app.run(threaded=True,
                    use_reloader=False,
                    debug=False,
                    host=args.host,
                    port=args.port,
                    ssl_context=ssl_context)
Example #50
0
class EnemyGame(Game):
    TIMER = 5

    def __init__(self, enemy_speed, label, dir_name=""):
        self.text_storage = TextStorage(dir_name)
        self.text_name, text = self.text_storage.get_random_string(label)
        super().__init__(text)
        self.enemy_input = UserInput(text)
        self.enemy_speed = enemy_speed
        self.inp_thread = Thread(target=self.input_thread)
        self.en_thread = Thread(target=self.enemy_thread)
        self.upd_thread = Thread(target=self.print_thread)
        self.threads_active = False
        self.enemy_finish = None

    def start_game(self):
        self.set_timer(self.current_state
                       (self.current_enemy_state(self.enemy_input),
                        self.current_state_string(self.user_input)))
        self._start_time = timeit.default_timer()
        self.inp_thread.start()
        self.en_thread.start()
        self.threads_active = True
        self.inp_thread.join()
        self.en_thread.join()
        if self.is_exit:
            SimpleGame.clear()
            sys.exit()
        finish_time = timeit.default_timer()
        res_time = finish_time - self._start_time
        if self.is_finished:
            self.print_results(self.current_state(None,
                                                  self.result_string(
                                                      self.user_input,
                                                      res_time,
                                                      finish_time)))
            self.get_label(self.text_storage, self.text_name)
        self._user.update_stat(self.user_input, res_time,
                               self.calc_current_speed())

    def input_thread(self):
        while not self.user_input.is_done():
            symbol = self.get_user_input()
            if symbol == '\x03':
                self.is_exit = True
                break
            if symbol is None:
                self.is_finished = False
                break
            self.user_input.update(symbol)
            self.print_results(
                self.current_state(self.current_enemy_state(self.enemy_input),
                                   self.current_state_string(self.user_input)))

    def print_thread(self):
        while not self.user_input.is_done():
            if not self.inp_thread.is_alive():
                break
            self.print_results(
                self.current_state(self.current_enemy_state(self.enemy_input),
                                   self.current_state_string(self.user_input)))
            time.sleep(0.5)

    def enemy_thread(self):
        while not self.enemy_input.is_done():
            if not self.inp_thread.is_alive():
                break
            symbol = self.enemy_input.get_blank_text()[0]
            self.enemy_input.update(symbol)
            spd = (self.enemy_speed // 60) or 1
            time.sleep(1 / spd)
            self.print_results(
                self.current_state(self.current_enemy_state(self.enemy_input),
                                   self.current_state_string(self.user_input)))
        self.enemy_finish = timeit.default_timer()
        self.upd_thread.start()
        self.upd_thread.join()

    def result_string(self, user_input, res_time, finish_time):
        s = f'{colored(user_input.text, "green")}\r\n' \
            f'{self._strings["acc"]}: ' \
            f'{user_input.accurate:.2%}\r\n' \
            f'{self._strings["time"]}: {res_time:.2f} ' \
            f'{self._strings["sec"]}\r\n' \
            f'{self._strings["avg_speed"]}:' \
            f' {self.calc_current_speed()} ' \
            f'{self._strings["speed"]}\r\n' \
            f'{self._strings["err"]}: ' \
            f'{user_input.error_count}\r\n' \
            f'{self._strings["delta"]}: ' \
            f'{finish_time - self.enemy_finish:.2f} ' \
            f'{self._strings["sec"]}'
        return s

    def current_state_string(self, user_input):
        s = f'{colored(user_input.correct_text(), "green")}' \
            f'{colored(user_input.get_incorrect_symbol(), "red")}' \
            f'{user_input.get_blank_text()}\r\n' \
            f'{colored(user_input.correct_input, "green")}' \
            f'{colored(user_input.incorrect_text, "red")}\r\n' \
            f'{self._strings["cur_speed"]}: ' \
            f'{self.calc_current_speed()} ' \
            f'{self._strings["speed"]}\r\n' \
            f'{self._strings["err"]}: ' \
            f'{self.user_input.error_count}\r'
        return s

    def current_state(self, enemy_res, user_res):
        res = user_res
        rows = os.get_terminal_size().columns
        if enemy_res:
            res = f"{enemy_res}\r\n{'-' * rows}\r\n{user_res}"
        return res

    def current_enemy_state(self, user_input):
        return '{}{}{}\r\n'.format(
            colored(user_input.correct_text(), 'magenta'),
            colored(user_input.get_incorrect_symbol(), 'red'),
            user_input.get_blank_text(),
        )
Example #51
0
#2 主线程在其他非守护线程运行完毕后才算运行完毕(守护线程在此时就被回收)。因为主线程的结束意味着进程的结束,进程整体的资源都将被回收,而进程必须保证非守护线程都运行完毕后才能结束。

'''

from threading import Thread
import time


def sayhi(name):
    print("start subthread")
    time.sleep(2)
    print('%s say hello' % name)


if __name__ == '__main__':
    t = Thread(target=sayhi, args=('egon', ))
    t2 = Thread(target=sayhi, args=('RYAN', ))
    t.setDaemon(True)  #必须在t.start()之前设置
    t.start()
    t2.start()
    print(t.is_alive())
    print('主线程')
'''
自己理解的  守护进程和守护线程:

守护进程:当主进程的代码结束以后就会结束主进程的守护进程,但是,主进程的结束会等着别的非守护进程结束而结束。
守护线程:当主线程的代码结束以后,并且主线程会等着别的子线程结束才结束,因此,守护线程会在主线程结束以后才结束,也就是守护线程会在主进程和所有的非守护线程结束以后才结束。

'''
Example #52
0
 def _end_connection(self, connection_thread: threading.Thread):
     aux_write_sock = self._active_connections[connection_thread]
     aux_write_sock.close()
     if connection_thread.is_alive():
         connection_thread.join()
Example #53
0
def update_pkg(pkg, url):
    """updating a package in frozen application folder
    expect to download and extract a wheel file e.g. "youtube_dlc-2020.10.24.post6-py2.py3-none-any.whl", which in fact
    is a zip file

    Args:
        pkg (str): package name
        url (str): download url (for a wheel file)
    """

    current_directory = config.current_directory
    log(f'start updating {pkg}')

    # check if the application is frozen, e.g. runs from a windows cx_freeze executable
    # if run from source, we will update system installed package and exit
    if not config.FROZEN:
        cmd = f'"{sys.executable}" -m pip install {pkg} --upgrade'
        success, output = run_command(cmd)
        if success:
            log(f'successfully updated {pkg}, please restart application',
                showpopup=True)
        return

    # paths
    temp_folder = os.path.join(current_directory, f'temp_{pkg}')
    extract_folder = os.path.join(temp_folder, 'extracted')
    z_fn = f'{pkg}.zip'
    z_fp = os.path.join(temp_folder, z_fn)

    target_pkg_folder = os.path.join(current_directory, f'lib/{pkg}')
    bkup_folder = os.path.join(current_directory, f'lib/{pkg}_bkup')
    new_pkg_folder = None

    # make temp folder
    log('making temp folder in:', current_directory)
    if not os.path.isdir(temp_folder):
        os.mkdir(temp_folder)

    def bkup():
        # backup current package folder
        log(f'delete previous backup and backup current {pkg}:')
        delete_folder(bkup_folder)
        shutil.copytree(target_pkg_folder, bkup_folder)

    def tar_extract():
        with tarfile.open(z_fp, 'r') as tar:
            tar.extractall(path=extract_folder)

    def zip_extract():
        with zipfile.ZipFile(z_fp, 'r') as z:
            z.extractall(path=extract_folder)

    extract = zip_extract

    def compile_file(q):
        while q.qsize():
            file = q.get()

            if file.endswith('.py'):
                try:
                    py_compile.compile(file, cfile=file + 'c')
                    os.remove(file)
                except Exception as e:
                    log('compile_file()> error', e)
            else:
                print(file, 'not .py file')

    def compile_all():
        q = queue.Queue()

        # get files list and add it to queue
        for item in os.listdir(new_pkg_folder):
            item = os.path.join(new_pkg_folder, item)

            if os.path.isfile(item):
                file = item
                # compile_file(file)
                q.put(file)
            else:
                folder = item
                for file in os.listdir(folder):
                    file = os.path.join(folder, file)
                    # compile_file(file)
                    q.put(file)

        tot_files_count = q.qsize()
        last_percent_value = 0

        # create 10 worker threads
        threads = []
        for _ in range(10):
            t = Thread(target=compile_file, args=(q, ), daemon=True)
            threads.append(t)
            t.start()

        # watch threads until finished
        while True:
            live_threads = [t for t in threads if t.is_alive()]
            processed_files_count = tot_files_count - q.qsize()
            percent = processed_files_count * 100 // tot_files_count
            if percent != last_percent_value:
                last_percent_value = percent
                log('#', start='', end='' if percent < 100 else '\n')

            if not live_threads and not q.qsize():
                break

            time.sleep(0.1)
        log('Finished compiling to .pyc files')

    def overwrite_pkg():
        delete_folder(target_pkg_folder)
        shutil.move(new_pkg_folder, target_pkg_folder)
        log('new package copied to:', target_pkg_folder)

    # start processing -------------------------------------------------------
    log(f'start updating {pkg} please wait ...')

    try:
        # use a thread to show some progress while backup
        t = Thread(target=bkup)
        t.start()
        while t.is_alive():
            log('#', start='', end='')
            time.sleep(0.3)

        log('\n', start='')

        # download from pypi
        log(f'step 1 of 4: downloading {pkg} raw files')
        buffer = download(url, file_name=z_fp)
        if not buffer:
            log(f'failed to download {pkg}, abort update')
            return

        # extract tar file
        log(f'step 2 of 4: extracting {z_fn}')

        # use a thread to show some progress while unzipping
        t = Thread(target=extract)
        t.start()
        while t.is_alive():
            log('#', start='', end='')
            time.sleep(0.3)

        log('\n', start='')
        log(f'{z_fn} extracted to: {temp_folder}')

        # define new pkg folder
        new_pkg_folder = os.path.join(extract_folder, pkg)

        # compile files from py to pyc
        log('step 3 of 4: compiling files, please wait')
        compile_all()

        # delete old package and replace it with new one
        log(f'step 4 of 4: overwrite old {pkg} files')
        overwrite_pkg()

        # clean old files
        log('delete temp folder')
        delete_folder(temp_folder)
        log(f'{pkg} ..... done updating \nplease restart Application now',
            showpopup=True)
    except Exception as e:
        log(f'update_pkg()> error', e)
class SubprocessInputStreamer:
    _end_of_stream = object()

    def __init__(self, popen_args, popen_kwargs=None, timeout=10):
        self.popen_args = popen_args
        self.popen_kwargs = popen_kwargs
        self.running = False
        self.timeout = timeout

    def start(self):
        assert not self.running

        self.stdout = None
        self.stderr = None
        self.error = None
        self.exit_code = None

        self._process = subprocess.Popen(self.popen_args,
                                         **(self.popen_kwargs or {}))

        self._queue = Queue()
        self.running = True

        if self._process.stdin is not None:
            self._stdin_thread = Thread(target=self._thread_write)
            self._stdin_thread.daemon = True
            self._stdin_thread.start()

        if self._process.stdout is not None:
            self._stdout_thread = Thread(target=self._thread_read,
                                         args=(self._process.stdout, 'stdout'))
            self._stdout_thread.daemon = True
            self._stdout_thread.start()

        if self._process.stderr is not None:
            self._stderr_thread = Thread(target=self._thread_read,
                                         args=(self._process.stderr, 'stderr'))
            self._stderr_thread.daemon = True
            self._stderr_thread.start()

    def stop(self):
        if not self.running:
            return

        end_time = time() + self.timeout

        self._queue.put(self._end_of_stream)
        self._queue.join()
        self._process.stdin.flush()
        self._process.stdin.close()

        self.running = False

        try:
            if self._process.stdin:
                self._stdin_thread.join(timeout=self._remaining_time(end_time))
                if self._stdin_thread.is_alive():
                    raise subprocess.TimeoutExpired(self._process.args,
                                                    self.timeout)
            if self._process.stdout:
                self._stdout_thread.join(
                    timeout=self._remaining_time(end_time))
                if self._stdout_thread.is_alive():
                    raise subprocess.TimeoutExpired(self._process.args,
                                                    self.timeout)
            if self._process.stderr:
                self._stderr_thread.join(
                    timeout=self._remaining_time(end_time))
                if self._stderr_thread.is_alive():
                    raise subprocess.TimeoutExpired(self._process.args,
                                                    self.timeout)

            self.exit_code = self._process.wait(
                timeout=self._remaining_time(end_time))
        except Exception as e:
            self.error = e
            self._process.kill()
            self._process.wait()
        finally:
            self._process = None
            self._queue = None

    @classmethod
    def _remaining_time(cls, end_time):
        return end_time - time()

    def __enter__(self):
        self.start()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.stop()

    def write(self, b: Union[bytes, memoryview]):
        assert self.running, 'Must start before writing'
        if isinstance(b, memoryview):
            b = bytes(b)
        self._queue.put(b)

    def _thread_write(self):
        while self.running:
            b = self._queue.get()
            if b is self._end_of_stream:
                self._queue.task_done()
                return

            try:
                self._process.stdin.write(b)
            except BrokenPipeError:
                # Failed to write to subprocess stdin (broken pipe, stdin likely closed.
                pass
            except Exception:
                # Failed to write to subprocess stdin. Handle appropriately (but make
                # sure to consume the queue).
                pass
            finally:
                self._queue.task_done()

    def _thread_read(self, fp, out_name):
        setattr(self, out_name, fp.read())
        fp.close()
Example #55
0
class ImagePanel(BasePanel):
    """
    MatPlotlib Image as a wx.Panel, suitable for embedding
    in any wx.Frame.   This provides a right-click popup
    menu for configuration, zoom by dragging, saving an image
    figure, and Ctrl-C for copy-image-to-clipboard, customizations
    of colormap, interpolation, and intesity scaling

    For more features, see PlotFrame, which embeds a PlotPanel
    and also provides, a Menu, StatusBar, and Printing support.
    """
    def __init__(self,
                 parent,
                 messenger=None,
                 data_callback=None,
                 cursor_callback=None,
                 lasso_callback=None,
                 redraw_callback=None,
                 zoom_callback=None,
                 contour_callback=None,
                 size=(525, 450),
                 dpi=100,
                 output_title='Image',
                 **kws):

        matplotlib.rc('lines', linewidth=2)
        BasePanel.__init__(self,
                           parent,
                           output_title=output_title,
                           messenger=messenger,
                           zoom_callback=zoom_callback,
                           **kws)

        self.conf = ImageConfig()
        self.conf.title = output_title
        self.cursor_mode = 'zoom'
        self.data_callback = data_callback
        self.cursor_callback = cursor_callback
        self.lasso_callback = lasso_callback
        self.contour_callback = contour_callback
        self.redraw_callback = redraw_callback
        self.slice_plotframe = None
        self.win_config = None
        self.size = size
        self.dpi = dpi
        self.user_limits = {}
        self.scalebar_rect = self.scalerbar_text = None
        self.BuildPanel()

    @property
    def xdata(self):
        return self.conf.xdata

    @xdata.setter
    def xdata(self, value):
        self.conf.xdata = value

    @property
    def ydata(self):
        return self.conf.ydata

    @ydata.setter
    def ydata(self, value):
        self.conf.ydata = value

    def display(self,
                data,
                x=None,
                y=None,
                xlabel=None,
                ylabel=None,
                style=None,
                nlevels=None,
                levels=None,
                contour_labels=None,
                store_data=True,
                col=0,
                unzoom=True,
                show_axis=False,
                auto_contrast=False,
                contrast_level=0,
                colormap=None,
                **kws):
        """
        generic display, using imshow (default) or contour
        """
        if style is not None:
            self.conf.style = style
        self.axes.cla()
        conf = self.conf
        conf.log_scale = False
        conf.show_axis = show_axis
        conf.highlight_areas = []
        if 1 in data.shape:
            data = data.squeeze()
        self.data_range = [0, data.shape[1], 0, data.shape[0]]
        if contrast_level not in (0, None):
            conf.contrast_level = contrast_level
        if auto_contrast:
            conf.contrast_level = 1
        if x is not None:
            conf.xdata = np.array(x)
            if conf.xdata.shape[0] != data.shape[1]:
                conf.xdata = None
        if y is not None:
            conf.ydata = np.array(y)
            if conf.ydata.shape[0] != data.shape[0]:
                conf.ydata = None

        if xlabel is not None:
            conf.xlab = xlabel
        if ylabel is not None:
            conf.ylab = ylabel
        if store_data:
            conf.data = data

        if self.conf.style == 'contour':
            if levels is None:
                levels = self.conf.ncontour_levels
            else:
                self.conf.ncontour_levels = levels
            if nlevels is None:
                nlevels = self.conf.ncontour_levels = 9
            nlevels = max(2, nlevels)

            if conf.contrast_level is not None:
                contrast = [conf.contrast_level, 100.0 - conf.contrast_level]
                imin, imax = np.percentile(conf.data, contrast)
                data = np.clip(conf.data, imin, imax)

            clevels = np.linspace(data.min(), data.max(), nlevels + 1)
            self.conf.contour_levels = clevels
            self.conf.image = self.axes.contourf(data,
                                                 cmap=self.conf.cmap[col],
                                                 levels=clevels)

            self.conf.contour = self.axes.contour(data,
                                                  cmap=self.conf.cmap[col],
                                                  levels=clevels)
            cmap_name = self.conf.cmap[col].name
            xname = 'gray'
            try:
                if cmap_name == 'gray_r':
                    xname = 'Reds_r'
                elif cmap_name == 'gray':
                    xname = 'Reds'
                elif cmap_name.endswith('_r'):
                    xname = 'gray_r'
            except:
                pass
            self.conf.contour.set_cmap(getattr(cmap, xname))

            if contour_labels is None:
                contour_labels = self.conf.contour_labels
            if contour_labels:
                nlog = np.log10(abs(clevels[1] - clevels[0]))
                fmt = "%.4f"
                if nlog < -2:
                    fmt = "%%.%df" % (1 - nlog)
                elif nlog > 2:
                    fmt = "%.1f"
                self.axes.clabel(self.conf.contour,
                                 fontsize=10,
                                 inline=1,
                                 fmt=fmt)
            if hasattr(self.contour_callback, '__call__'):
                self.contour_callback(levels=clevels)
        else:
            if data.max() == data.min():
                img = data
            else:
                img = (data - data.min()) / (1.0 * data.max() - data.min())
            if colormap is not None:
                self.conf.set_colormap(colormap, icol=col)
            self.conf.image = self.axes.imshow(img,
                                               cmap=self.conf.cmap[col],
                                               interpolation=self.conf.interp)

        self.autoset_margins()

        if unzoom:
            self.unzoom_all()

        if hasattr(self.data_callback, '__call__'):
            self.data_callback(data, x=x, y=y, **kws)

        self.conf.indices = None
        self.indices_thread = Thread(target=self.calc_indices,
                                     args=(data.shape, ))
        self.indices_thread.start()

    def update_image(self, data):
        """
        update image on panel, as quickly as possible
        """
        if 1 in data.shape:
            data = data.squeeze()
        if self.conf.contrast_level is not None:
            clevels = [
                self.conf.contrast_level, 100.0 - self.conf.contrast_level
            ]
            imin, imax = np.percentile(data, clevels)
            data = np.clip((data - imin) / (imax - imin + 1.e-8), 0, 1)
        self.axes.images[0].set_data(data)
        self.canvas.draw()

    def autoset_margins(self):
        """auto-set margins  left, bottom, right, top
        according to the specified margins (in pixels)
        and axes extent (taking into account labels,
        title, axis)
        """
        if self.conf.show_axis:
            self.axes.set_axis_on()
            if self.conf.show_grid:
                self.axes.grid(True,
                               alpha=self.conf.grid_alpha,
                               color=self.conf.grid_color)
            else:
                self.axes.grid(False)
            self.conf.set_formatters()

            l, t, r, b = 0.08, 0.96, 0.96, 0.08
            if self.conf.xlab is not None:
                self.axes.set_xlabel(self.conf.xlab)
                b, t = 0.11, 0.96
            if self.conf.ylab is not None:
                self.axes.set_ylabel(self.conf.ylab)
                l, r = 0.11, 0.96
        else:
            self.axes.set_axis_off()
            l, t, r, b = 0.01, 0.99, 0.99, 0.01
        self.gridspec.update(left=l, top=t, right=r, bottom=b)

        for ax in self.fig.get_axes():
            ax.update_params()
            ax.set_position(ax.figbox)

    def add_highlight_area(self, mask, label=None, col=0):
        """add a highlighted area -- outline an arbitrarily shape --
        as if drawn from a Lasso event.

        This takes a mask, which should be a boolean array of the
        same shape as the image.
        """
        patch = mask * np.ones(mask.shape) * 0.9
        cmap = self.conf.cmap[col]
        area = self.axes.contour(patch, cmap=cmap, levels=[0, 1])
        self.conf.highlight_areas.append(area)
        col = None
        if hasattr(cmap, '_lut'):
            rgb = [int(i * 240) ^ 255 for i in cmap._lut[0][:3]]
            col = '#%02x%02x%02x' % (rgb[0], rgb[1], rgb[2])

        if label is not None:

            def fmt(*args, **kws):
                return label

            self.axes.clabel(area,
                             fontsize=9,
                             fmt=fmt,
                             colors=col,
                             rightside_up=True)

        if col is not None:
            for l in area.collections:
                l.set_color(col)

        self.canvas.draw()

    def set_viewlimits(self, axes=None):
        """ update xy limits of a plot"""
        if axes is None:
            axes = self.axes

        xmin, xmax, ymin, ymax = self.data_range
        if len(self.conf.zoom_lims) > 1:
            zlims = self.conf.zoom_lims[-1]
            if axes in zlims:
                xmin, xmax, ymin, ymax = zlims[axes]

        xmin = max(self.data_range[0], xmin)
        xmax = min(self.data_range[1], xmax)
        ymin = max(self.data_range[2], ymin)
        ymax = min(self.data_range[3], ymax)
        if (xmax < self.data_range[0] or xmin > self.data_range[1]
                or ymax < self.data_range[2] or ymin > self.data_range[3]):
            self.conf.zoom_lims.pop()
            return

        if abs(xmax - xmin) < 2:
            xmin = int(0.5 * (xmax + xmin) - 1)
            xmax = xmin + 2

        if abs(ymax - ymin) < 2:
            ymin = int(0.5 * (ymax + xmin) - 1)
            ymax = ymin + 2

        self.axes.set_xlim((xmin, xmax), emit=True)
        self.axes.set_ylim((ymin, ymax), emit=True)
        self.axes.update_datalim(((xmin, ymin), (xmax, ymax)))

        self.conf.datalimits = [xmin, xmax, ymin, ymax]
        self.conf.reset_formats()
        self.redraw()

    def clear(self):
        """ clear plot """
        self.axes.cla()
        self.conf.title = ''

    ####
    ## create GUI
    ####
    def BuildPanel(self):
        """ builds basic GUI panel and popup menu"""
        figsize = (1.0 * self.size[0] / self.dpi,
                   1.0 * self.size[1] / self.dpi)
        self.fig = Figure(figsize, dpi=self.dpi)
        self.gridspec = GridSpec(1, 1)
        self.axes = self.fig.add_subplot(self.gridspec[0], facecolor='#FFFFFD')
        self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
        self.conf.axes = self.axes
        self.conf.fig = self.fig
        self.conf.canvas = self.canvas

        # self.canvas.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
        # This way of adding to sizer allows resizing
        sizer = wx.BoxSizer(wx.HORIZONTAL)
        sizer.Add(self.canvas, 1, wx.ALL | wx.GROW)
        self.SetSizer(sizer)
        self.Fit()
        self.addCanvasEvents()

    def BuildPopup(self):
        # build pop-up menu for right-click display
        self.popup_menu = popup = wx.Menu()
        MenuItem(self, popup, 'Zoom out', '', self.unzoom)
        MenuItem(self, popup, 'Zoom all the way out', '', self.unzoom_all)

        self.popup_menu.AppendSeparator()

        MenuItem(self, popup, 'Rotate 90deg  (CW)', '', self.rotate90)
        MenuItem(self, popup, 'Save Image', '', self.save_figure)

    def rotate90(self, event=None, display=True):
        "rotate 90 degrees, CW"
        self.conf.rotate90()
        if display:
            conf = self.conf
            self.display(conf.data,
                         x=conf.xdata,
                         y=conf.ydata,
                         xlabel=conf.xlab,
                         ylabel=conf.ylab,
                         show_axis=conf.show_axis,
                         levels=conf.ncontour_levels)

    def flip_horiz(self):
        self.conf.flip_horiz()

    def flip_vert(self):
        self.conf.flip_vert()

    def restore_flips_rotations(self):
        "restore flips and rotations"
        conf = self.conf
        if conf.flip_lr:
            self.flip_horiz()
        if conf.flip_ud:
            self.flip_vert()
        if conf.rot_level != 0:
            for i in range(4 - conf.rot_level):
                self.rotate90(display=False)
            self.display(conf.data,
                         x=conf.xdata,
                         y=conf.ydata,
                         xlabel=conf.xlab,
                         ylabel=conf.ylab,
                         show_axis=conf.show_axis)

    def toggle_curmode(self, event=None):
        "toggle cursor mode"
        if self.cursor_mode == 'zoom':
            self.cursor_mode = 'lasso'
        else:
            self.cursor_mode = 'zoom'

    ####
    ## GUI events, overriding BasePanel components
    ####
    def calc_indices(self, shape):
        """calculates and stores the set of indices
        ix=[0, nx-1], iy=[0, ny-1] for data of shape (nx, ny)"""
        if len(shape) == 2:
            ny, nx = shape
        elif len(shape) == 3:
            ny, nx, nchan = shape

        inds = []
        for iy in range(ny):
            inds.extend([(ix, iy) for ix in range(nx)])
        self.conf.indices = np.array(inds)

    def lassoHandler(self, vertices):
        if self.conf.indices is None or self.indices_thread.is_alive():
            self.indices_thread.join()
        ind = self.conf.indices
        mask = inside_poly(vertices, ind)
        mask.shape = (self.conf.data.shape[0], self.conf.data.shape[1])
        self.lasso = None
        self.canvas.draw()
        if hasattr(self.lasso_callback, '__call__'):
            self.lasso_callback(mask=mask)

    def unzoom(self, event=None, set_bounds=True):
        """ zoom out 1 level, or to full data range """
        lims = None
        if len(self.conf.zoom_lims) > 1:
            lims = self.conf.zoom_lims.pop()
        ax = self.axes
        if lims is None:  # auto scale
            self.conf.zoom_lims = [None]
            xmin, xmax, ymin, ymax = self.data_range
            lims = {self.axes: [xmin, xmax, ymin, ymax]}
        self.set_viewlimits()
        self.canvas.draw()

    def zoom_leftup(self, event=None):
        """leftup event handler for zoom mode  in images"""
        if self.zoom_ini is None:
            return

        ini_x, ini_y, ini_xd, ini_yd = self.zoom_ini
        try:
            dx = abs(ini_x - event.x)
            dy = abs(ini_y - event.y)
        except:
            dx, dy = 0, 0
        t0 = time.time()
        self.rbbox = None
        self.zoom_ini = None
        if (dx > 3) and (dy > 3) and (t0 - self.mouse_uptime) > 0.1:
            self.mouse_uptime = t0
            zlims, tlims = {}, {}
            ax = self.axes
            xmin, xmax = ax.get_xlim()
            ymin, ymax = ax.get_ylim()

            zlims[ax] = [xmin, xmax, ymin, ymax]

            if len(self.conf.zoom_lims) == 0:
                self.conf.zoom_lims.append(zlims)

            ax_inv = ax.transData.inverted
            try:
                x1, y1 = ax_inv().transform((event.x, event.y))
            except:
                x1, y1 = self.x_lastmove, self.y_lastmove
            try:
                x0, y0 = ax_inv().transform((ini_x, ini_y))
            except:
                x0, y0 = ini_xd, ini_yd

            tlims[ax] = [
                int(round(min(x0, x1))),
                int(round(max(x0, x1))),
                int(round(min(y0, y1))),
                int(round(max(y0, y1)))
            ]
            self.conf.zoom_lims.append(tlims)
            # now apply limits:
            self.set_viewlimits()
            if callable(self.zoom_callback):
                self.zoom_callback(wid=self.GetId(), limits=tlims[ax])

    def unzoom_all(self, event=None):
        """ zoom out full data range """
        self.conf.zoom_lims = [None]
        self.unzoom(event)

    def redraw(self, col=0):
        """redraw image, applying
        - log scaling,
        - max/min values from sliders or explicit intensity ranges
        - color map
        - interpolation
        """
        conf = self.conf
        img = conf.data
        if img is None: return
        if len(img.shape) == 2:
            col = 0
        if self.conf.style == 'image':
            if conf.log_scale:
                img = np.log10(1 + 9.0 * img)

        # apply intensity scale for current limited (zoomed) image
        if len(img.shape) == 2:
            # apply clipped color scale, as from sliders

            imin = float(conf.int_lo[col])
            imax = float(conf.int_hi[col])
            if conf.log_scale:
                imin = np.log10(1 + 9.0 * imin)
                imax = np.log10(1 + 9.0 * imax)

            (xmin, xmax, ymin, ymax) = self.conf.datalimits
            if xmin is None: xmin = 0
            if xmax is None: xmax = img.shape[1]
            if ymin is None: ymin = 0
            if ymax is None: ymax = img.shape[0]

            img = (img - imin) / (imax - imin + 1.e-8)
            mlo = conf.cmap_lo[0] / (1.0 * conf.cmap_range)
            mhi = conf.cmap_hi[0] / (1.0 * conf.cmap_range)
            if self.conf.style == 'image':
                conf.image.set_data(
                    np.clip((img - mlo) / (mhi - mlo + 1.e-8), 0, 1))
                conf.image.set_interpolation(conf.interp)
        else:
            r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]

            rmin = float(conf.int_lo[0])
            rmax = float(conf.int_hi[0])
            gmin = float(conf.int_lo[1])
            gmax = float(conf.int_hi[1])
            bmin = float(conf.int_lo[2])
            bmax = float(conf.int_hi[2])
            if conf.log_scale:
                rmin = np.log10(1 + 9.0 * rmin)
                rmax = np.log10(1 + 9.0 * rmax)
                gmin = np.log10(1 + 9.0 * gmin)
                gmax = np.log10(1 + 9.0 * gmax)
                bmin = np.log10(1 + 9.0 * bmin)
                bmax = np.log10(1 + 9.0 * bmax)

            rlo = conf.cmap_lo[0] / (1.0 * conf.cmap_range)
            rhi = conf.cmap_hi[0] / (1.0 * conf.cmap_range)
            glo = conf.cmap_lo[1] / (1.0 * conf.cmap_range)
            ghi = conf.cmap_hi[1] / (1.0 * conf.cmap_range)
            blo = conf.cmap_lo[2] / (1.0 * conf.cmap_range)
            bhi = conf.cmap_hi[2] / (1.0 * conf.cmap_range)
            r = (r - rmin) / (rmax - rmin + 1.e-8)
            g = (g - gmin) / (gmax - gmin + 1.e-8)
            b = (b - bmin) / (bmax - bmin + 1.e-8)

            inew = img * 1.0
            inew[:, :, 0] = np.clip((r - rlo) / (rhi - rlo + 1.e-8), 0, 1)
            inew[:, :, 1] = np.clip((g - glo) / (ghi - glo + 1.e-8), 0, 1)
            inew[:, :, 2] = np.clip((b - blo) / (bhi - blo + 1.e-8), 0, 1)

            whitebg = conf.tricolor_bg.startswith('wh')

            if whitebg:
                inew = conf.tricolor_white_bg(inew)

            if self.conf.style == 'image':
                conf.image.set_data(inew)
                conf.image.set_interpolation(conf.interp)

        try:
            self.scalebar_rect.remove()
        except:
            pass
        try:
            self.scalebar_text.remove()
        except:
            pass

        if conf.scalebar_show:
            ystep, xstep = conf.scalebar_pixelsize
            if xstep is None or ystep is None:
                ystep, xstep = 1, 1
                if conf.xdata is not None:
                    xstep = abs(np.diff(conf.xdata).mean())
                if conf.ydata is not None:
                    ystep = abs(np.diff(conf.ydata).mean())
                self.scalebar_pixelsize = ystep, xstep
            y, x = conf.scalebar_pos
            y, x = int(y), int(x)
            h, w = conf.scalebar_size
            h, w = int(h), int(w / xstep)
            col = conf.scalebar_color

            self.scalebar_rect = Rectangle((x, y),
                                           w,
                                           h,
                                           linewidth=1,
                                           edgecolor=col,
                                           facecolor=col)
            self.axes.add_patch(self.scalebar_rect)
            if conf.scalebar_showlabel:
                x = int(x + w / 4)
                y = y - 3 * h
                self.scalebar_text = self.axes.text(x,
                                                    y,
                                                    conf.scalebar_label,
                                                    color=col)
        self.canvas.draw()
        if callable(self.redraw_callback):
            self.redraw_callback(wid=self.GetId())

    def report_motion(self, event=None):
        if event.inaxes is None:
            return
        fmt = "X,Y= %g, %g"
        x, y = event.xdata, event.ydata
        if len(self.fig.get_axes()) > 1:
            try:
                x, y = self.axes.transData.inverted().transform((x, y))
            except:
                pass
        if self.motion_sbar is None:
            try:
                self.motion_sbar = self.nstatusbar - 1
            except AttributeError:
                self.motion_sbar = 1
        self.write_message(fmt % (x, y), panel=self.motion_sbar)
        conf = self.conf
        if conf.slice_onmotion:
            ix, iy = int(round(x)), int(round(y))
            if (ix >= 0 and ix < conf.data.shape[1] and iy >= 0
                    and iy < conf.data.shape[0]):
                conf.slice_xy = ix, iy
                self.update_slices()

    def report_leftdown(self, event=None):
        if event == None:
            return
        if event.xdata is None or event.ydata is None:
            return

        ix, iy = int(round(event.xdata)), int(round(event.ydata))

        conf = self.conf
        if (ix >= 0 and ix < conf.data.shape[1] and iy >= 0
                and iy < conf.data.shape[0]):
            pos = ''
            if conf.xdata is not None:
                pos = ' %s=%.4g,' % (conf.xlab, conf.xdata[ix])
            if conf.ydata is not None:
                pos = '%s %s=%.4g,' % (pos, conf.ylab, conf.ydata[iy])
            dval = conf.data[iy, ix]
            if len(conf.data.shape) == 3:
                dval = "%.4g, %.4g, %.4g" % tuple(dval)
            else:
                dval = "%.4g" % dval
            msg = "Pixel [%i, %i], %s Intensity=%s " % (ix, iy, pos, dval)

            self.write_message(msg, panel=0)
            conf.slice_xy = ix, iy
            self.update_slices()
            if hasattr(self.cursor_callback, '__call__'):
                self.cursor_callback(x=event.xdata, y=event.ydata)

    def get_slice_plotframe(self):
        shown = False
        new_plotter = False
        if self.slice_plotframe is not None:
            try:
                self.slice_plotframe.Raise()
                shown = True
            except:
                pass
        if not shown:
            self.slice_plotframe = pf = PlotFrame(self)
            new_plotter = True
            try:
                xpos, ypos = self.parent.GetPosition()
                xsiz, ysiz = self.parent.GetSize()
                pf.SetPosition((xpos + xsiz + 10, ypos))
            except:
                pass

        return new_plotter, self.slice_plotframe

    def update_slices(self):
        if self.conf.slices in ('None', None, 0):
            return
        x, y = -1, -1
        try:
            x, y = [int(a) for a in self.conf.slice_xy]
        except:
            return
        if len(self.conf.data.shape) == 3:
            ymax, xmax, nc = self.conf.data.shape
        elif len(self.conf.data.shape) == 2:
            ymax, xmax = self.conf.data.shape
            nc = 0
        else:
            return
        if x < 0 or y < 0 or x > xmax or y > ymax:
            return

        wid = int(self.conf.slice_width)
        new_plotter, pf = self.get_slice_plotframe()

        popts = {'ylabel': 'Intensity', 'linewidth': 3}

        if self.conf.slices.lower() == 'x':
            y1 = int(y - wid / 2. + 1)
            y2 = int(y + wid / 2.) + 1
            if y1 < 0: y1 = 0
            if y2 > ymax: y2 = ymax
            _x = self.conf.xdata
            if _x is None:
                _x = np.arange(self.conf.data.shape[1])
            _y = self.conf.data[y1:y2].sum(axis=0)
            popts['xlabel'] = 'X'
            popts['title'] = 'X Slice: Y=%d:%d' % (y1, y2)
            if y2 == y1 + 1:
                popts['title'] = 'X Slice: Y=%d' % y1

        else:
            x1 = int(x - wid / 2.0 + 1)
            x2 = int(x + wid / 2.0) + 1
            if x1 < 0: x1 = 0
            if x2 > xmax: x2 = xmax
            _x = self.conf.ydata
            if _x is None:
                _x = np.arange(self.conf.data.shape[0])
            _y = self.conf.data[:, x1:x2].sum(axis=1)
            popts['xlabel'] = 'Y'
            popts['title'] = 'Y Slice: X=%d:%d' % (x1, x2)
            if x2 == x1 + 1:
                popts['title'] = 'Y Slice: X=%d' % x1

        if new_plotter:
            if len(_y.shape) == 2 and _y.shape[1] == 3:
                pf.plot(_x,
                        _y[:, 0],
                        color=RGB_COLORS[0],
                        delay_draw=True,
                        **popts)
                pf.oplot(_x,
                         _y[:, 1],
                         color=RGB_COLORS[1],
                         delay_draw=True,
                         **popts)
                pf.oplot(_x, _y[:, 2], color=RGB_COLORS[2], **popts)
            else:
                pf.plot(_x, _y, **popts)
        else:
            pf.panel.set_title(popts['title'], delay_draw=True)
            pf.panel.set_xlabel(popts['xlabel'], delay_draw=True)
            if len(_y.shape) == 2 and _y.shape[1] == 3:
                pf.update_line(0, _x, _y[:, 0], update_limits=True, draw=False)
                pf.update_line(1, _x, _y[:, 1], update_limits=True, draw=False)
                pf.update_line(2, _x, _y[:, 2], update_limits=True, draw=True)
            else:
                pf.update_line(0, _x, _y, update_limits=True, draw=True)

        pf.Show()
        self.SetFocus()
        try:
            self.parent.Raise()
        except:
            pass
Example #56
0
class BackgroundMonitor(object):
    # If we will need multiple monitoring contexts (i.e. subprocesses) this will become a dict
    _main_process = None
    _main_process_task_id = None
    _parent_pid = None
    _sub_process_started = None
    _instances = {}  # type: Dict[int, List[BackgroundMonitor]]

    def __init__(self, task, wait_period):
        self._event = TrEvent()
        self._done_ev = TrEvent()
        self._start_ev = TrEvent()
        self._task_pid = os.getpid()
        self._thread = None
        self._wait_timeout = wait_period
        self._subprocess = None if task.is_main_task() else False
        self._task_id = task.id
        self._task_obj_id = id(task.id)

    def start(self):
        if not self._thread:
            self._thread = True
        self._event.clear()
        self._done_ev.clear()
        if self._subprocess is False:
            # start the thread we are in threading mode.
            self._start()
        else:
            # append to instances
            if self not in self._get_instances():
                self._get_instances().append(self)

    def wait(self, timeout=None):
        if not self._done_ev:
            return
        self._done_ev.wait(timeout=timeout)

    def _start(self):
        # if we already started do nothing
        if isinstance(self._thread, Thread):
            return
        self._thread = Thread(target=self._daemon)
        self._thread.daemon = True
        self._thread.start()

    def stop(self):
        if not self._thread:
            return

        if not self.is_subprocess() or self.is_subprocess_alive():
            self._event.set()

        if isinstance(self._thread, Thread):
            try:
                self._get_instances().remove(self)
            except ValueError:
                pass
            self._thread = None

    def daemon(self):
        while True:
            if self._event.wait(self._wait_timeout):
                break
            self._daemon_step()

    def _daemon(self):
        self._start_ev.set()
        self.daemon()
        self.post_execution()
        self._thread = None

    def post_execution(self):
        self._done_ev.set()

    def set_subprocess_mode(self):
        # called just before launching the daemon in a subprocess
        if not self._subprocess:
            self._subprocess = True
        if not isinstance(self._done_ev, SafeEvent):
            self._done_ev = SafeEvent()
        if not isinstance(self._start_ev, SafeEvent):
            self._start_ev = SafeEvent()
        if not isinstance(self._event, SafeEvent):
            self._event = SafeEvent()

    def _daemon_step(self):
        pass

    @classmethod
    def start_all(cls, task, wait_for_subprocess=True):
        # noinspection PyProtectedMember
        execute_in_subprocess = task._report_subprocess_enabled

        if not execute_in_subprocess:
            for d in BackgroundMonitor._instances.get(id(task.id), []):
                d._start()
        elif not BackgroundMonitor._main_process:
            cls._parent_pid = os.getpid()
            cls._sub_process_started = SafeEvent()
            cls._sub_process_started.clear()
            cls._main_process_task_id = task.id
            # setup
            for d in BackgroundMonitor._instances.get(id(task.id), []):
                d.set_subprocess_mode()
            # todo: solve for standalone spawn subprocess
            BackgroundMonitor._main_process = Process(
                target=cls._background_process_start,
                args=(id(task.id), cls._sub_process_started)
            )
            BackgroundMonitor._main_process.daemon = True
            # Hack allow to create daemon subprocesses (even though python doesn't like it)
            un_daemonize = False
            # noinspection PyBroadException
            try:
                from multiprocessing import current_process
                if current_process()._config.get('daemon'):  # noqa
                    un_daemonize = current_process()._config.get('daemon')  # noqa
                    current_process()._config['daemon'] = False  # noqa
            except BaseException:
                pass
            # try to start the background process, if we fail retry again, or crash
            for i in range(4):
                try:
                    BackgroundMonitor._main_process.start()
                    break
                except BaseException:
                    if i < 3:
                        sleep(1)
                        continue
                    raise
            if un_daemonize:
                # noinspection PyBroadException
                try:
                    from multiprocessing import current_process
                    current_process()._config['daemon'] = un_daemonize  # noqa
                except BaseException:
                    pass
            # wait until subprocess is up
            if wait_for_subprocess:
                cls._sub_process_started.wait()

    @classmethod
    def _background_process_start(cls, task_obj_id, event_start=None):
        # type: (int, Optional[SafeEvent]) -> None
        is_debugger_running = bool(getattr(sys, 'gettrace', None) and sys.gettrace())
        # restore original signal, this will prevent any deadlocks
        # Do not change the exception we need to catch base exception as well
        # noinspection PyBroadException
        try:
            from ... import Task
            # make sure we do not call Task.current_task() it will create a Task object for us on a subprocess!
            # noinspection PyProtectedMember
            if Task._has_current_task_obj():
                # noinspection PyProtectedMember
                Task.current_task()._remove_at_exit_callbacks()
        except:  # noqa
            pass

        # if a debugger is running, wait for it to attach to the subprocess
        if is_debugger_running:
            sleep(3)

        instances = BackgroundMonitor._instances.get(task_obj_id, [])
        # launch all the threads
        for d in instances:
            d._start()

        if cls._sub_process_started:
            cls._sub_process_started.set()

        if event_start:
            event_start.set()

        # wait until we are signaled
        for i in instances:
            # noinspection PyBroadException
            try:
                if i._thread and i._thread.is_alive():
                    # DO Not change, we need to catch base exception, if the process gte's killed
                    try:
                        i._thread.join()
                    except:  # noqa
                        break
                else:
                    pass
            except:  # noqa
                pass
        # we are done, leave process
        return

    def is_alive(self):
        if self.is_subprocess():
            return self.is_subprocess_alive() and self._thread \
                   and self._start_ev.is_set() and not self._done_ev.is_set()
        else:
            return isinstance(self._thread, Thread) and self._thread.is_alive()

    @classmethod
    def is_subprocess_alive(cls, task=None):
        if not cls._main_process or (task and cls._main_process_task_id != task.id):
            return False
        # noinspection PyBroadException
        try:
            return \
                cls._main_process.is_alive() and \
                psutil.Process(cls._main_process.pid).status() != psutil.STATUS_ZOMBIE
        except Exception:
            current_pid = cls._main_process.pid
            if not current_pid:
                return False
            try:
                parent = psutil.Process(cls._parent_pid)
            except psutil.Error:
                # could not find parent process id
                return
            for child in parent.children(recursive=True):
                # kill ourselves last (if we need to)
                if child.pid == current_pid:
                    return child.status() != psutil.STATUS_ZOMBIE
            return False

    def is_subprocess(self):
        return self._subprocess is not False and \
               bool(self._main_process) and self._task_id == self._main_process_task_id

    def _get_instances(self):
        return self._instances.setdefault(self._task_obj_id, [])

    @classmethod
    def is_subprocess_enabled(cls, task=None):
        return bool(cls._main_process) and (not task or task.id == cls._main_process_task_id)

    @classmethod
    def clear_main_process(cls, task):
        if BackgroundMonitor._main_process_task_id != task.id:
            return
        cls.wait_for_sub_process(task)
        BackgroundMonitor._main_process = None
        BackgroundMonitor._main_process_task_id = None
        BackgroundMonitor._parent_pid = None
        BackgroundMonitor._sub_process_started = None
        BackgroundMonitor._instances = {}
        SingletonThreadPool.clear()

    @classmethod
    def wait_for_sub_process(cls, task, timeout=None):
        if not cls.is_subprocess_enabled(task=task):
            return

        for d in BackgroundMonitor._instances.get(id(task.id), []):
            d.stop()

        tic = time()
        while cls.is_subprocess_alive(task=task) and (not timeout or time()-tic < timeout):
            sleep(0.03)
Example #57
0
def start_manager_notif(worker):
    v = Manager_notifications(viewing_time=1)
    while not thrd_stop:
        v.run_manager(worker)


global str_sms  #!!!!
str_sms = []
worker = Read_SMS()
thrd = Thread(target=start_manager_notif, args=(str_sms, ))
thrd.start()

sms_request = SmsRequest()

try:
    while thrd.is_alive():
        number_for_send = sms_request.get_sending_status()
        try:
            worker.read_sms()
        except UnicodeEncodeError as err:
            print(err)
            sleep(1)
            continue
        if str_sms:
            for t in str_sms:
                numbers, text = create_sms_to_send(t)
                for number in numbers:
                    worker.send_sms(number=number, message=text)
                    print(f"sms rev {worker.modem._smsRef-1}")
            str_sms.clear()
        if number_for_send:
Example #58
0
class FixedMainEngine(MainEngine):
    def __init__(self,
                 broker,
                 need_data='ht.json',
                 quotation_engines=[FixedDataEngine],
                 log_handler=DefaultLogHandler(),
                 ext_stocks=[],
                 s='sina'):
        super(FixedMainEngine, self).__init__(broker, need_data, [],
                                              log_handler)
        if type(quotation_engines) != list:
            quotation_engines = [quotation_engines]
        self.quotation_engines = []
        # 修改时间缓存
        self._cache = {}
        # 文件进程映射
        self._process_map = {}
        # 文件模块映射
        self._modules = {}
        self._names = None
        # 加载锁
        self.lock = Lock()
        # 加载线程
        self._watch_thread = Thread(target=self._load_strategy)
        #positions = [p['stock_code'] for p in self.user.position]
        #print(self.user.position)
        positions = self.user.position['证券代码'].values.tolist()
        positions.extend(ext_stocks)
        for quotation_engine in quotation_engines:
            self.quotation_engines.append(
                quotation_engine(self.event_engine, self.clock_engine,
                                 positions, s))

    def load(self, names, strategy_file):
        with self.lock:
            mtime = os.path.getmtime(os.path.join('strategies', strategy_file))

            # 是否需要重新加载
            reload = False
            strategy_module_name = os.path.basename(strategy_file)[:-3]
            if self._cache.get(strategy_file, None) == mtime:
                return
            elif self._cache.get(strategy_file, None) is not None:
                # 原有进程退出
                _process = self._process_map.get(strategy_file)
                self.unbind_event(_process)
                _process.stop()
                self.log.info(u'卸载策略: %s' % strategy_module_name)
                time.sleep(2)
                reload = True
            # 重新加载
            if reload:
                strategy_module = importlib.reload(
                    self._modules[strategy_file])
            else:
                strategy_module = importlib.import_module(
                    '.' + strategy_module_name, 'strategies')
            self._modules[strategy_file] = strategy_module

            strategy_class = getattr(strategy_module, 'Strategy')
            if names is None or strategy_class.name in names:
                self.strategies[strategy_module_name] = strategy_class
                # 进程包装
                #_process = ProcessWrapper(strategy_class(self.user, log_handler=self.log, main_engine=self))
                _process = ProcessWrapper(
                    strategy_class(log_handler=self.log, main_engine=self))
                # 缓存加载信息
                self._process_map[strategy_file] = _process
                self.strategy_list.append(_process)
                self._cache[strategy_file] = mtime
                self.bind_event(_process)
                self.log.info(u'加载策略: %s' % strategy_module_name)

    def bind_event(self, strategy):
        """
        绑定事件
        """
        for quotation_engine in self.quotation_engines:
            self.event_engine.register(quotation_engine.EventType,
                                       strategy.on_event)
        self.event_engine.register(ClockEngine.EventType, strategy.on_clock)

    def unbind_event(self, strategy):
        """
        移除事件
        """
        for quotation_engine in self.quotation_engines:
            self.event_engine.unregister(quotation_engine.EventType,
                                         strategy.on_event)
        self.event_engine.unregister(ClockEngine.EventType, strategy.on_clock)

    def load_strategy(self, names=None):
        """动态加载策略
        :param names: 策略名列表,元素为策略的 name 属性"""
        s_folder = 'strategies'
        self._names = names
        strategies = os.listdir(s_folder)
        strategies = filter(
            lambda file: file.endswith('.py') and file != '__init__.py',
            strategies)
        importlib.import_module(s_folder)
        for strategy_file in strategies:
            self.load(self._names, strategy_file)
        # 如果线程没有启动,就启动策略监视线程
        if not self._watch_thread.is_alive():
            self._watch_thread.start()

    def _load_strategy(self):
        while True:
            try:
                self.load_strategy(self._names)
                time.sleep(2)
            except Exception as e:
                print(e)
Example #59
0
    def start_download(self):
        if self.checkBox_from_file.isChecked() and self.lineEdit_path2file.text() == "" \
                or not self.checkBox_from_file.isChecked() and self.lineEdit_keywords.text() == "":
            print("Keywords is empty!")
            self.lineEdit_keywords.setFocus()
            return

        if self.lineEdit_output.text() == "":
            print("Output directory is empty!")
            self.lineEdit_output.setFocus()
            return

        self.state = "run"
        self.pushButton_start.setEnabled(False)
        self.pushButton_cancel.setEnabled(True)

        config, keywords_list = self.gen_config_from_ui()

        self.elapsed_timer.restart()
        self.update_timer.start()

        self.reset_ui()
        num_keywords = len(keywords_list)

        self.progressBar_total.setMaximum(num_keywords)
        self.progressBar_total.setFormat("%p%, %v/%m")
        self.progressBar_total.setValue(0)

        for index in range(num_keywords):
            if self.state != "run":
                break
            keywords = keywords_list[index].strip()
            if keywords == "":
                continue

            config.keywords = keywords
            str_paras = config.to_command_paras()

            print(str_paras)

            self.progressBar_current.setMaximum(config.max_number)
            self.progressBar_current.setValue(0)
            self.progressBar_current.setFormat(keywords + ", %p%, %v/%m")

            thread_download = Thread(target=image_downloader.main,
                                     args=[shlex.split(str_paras)])
            thread_download.start()

            while thread_download.is_alive():
                QTest.qWait(1000)
                if self.isHidden():
                    os._exit(0)

            self.progressBar_total.setValue(index + 1)

        if self.state == "run":
            self.state = "stop"
        self.pushButton_cancel.setEnabled(False)
        self.pushButton_start.setEnabled(True)
        self.update_timer.stop()
        print("stopped")
        pass
Example #60
0
class ThreadController(ServiceController):
    """Service controller that starts the service in separate thread.
    """
    def __init__(self,
                 service: ServiceDescriptor,
                 *,
                 name: str = None,
                 peer_uid: uuid.UUID = None,
                 manager: ChannelManager = None):
        """
        Arguments:
            service: Service to start.
            name:  Container name.
            peer_uid: Peer ID, `None` means that newly generated UUID type 1 should be used.
        """
        super().__init__(service,
                         name=name,
                         peer_uid=peer_uid,
                         manager=manager)
        self.runtime: Thread = None

    def handle_stop_controller(self, exc: Exception) -> None:
        """Called when controller should stop its operation due to error condition.

        Arguments:
           exc: Exception that describes the reason why component should stop.
        """
        raise ServiceError("Internal controller error") from exc

    def is_running(self) -> bool:
        """Returns True if service is running.
        """
        if self.runtime is None:
            return False
        if self.runtime.is_alive():
            return True
        # It's dead, so dispose the runtime
        self.runtime = None
        return False

    def start(self, *, timeout: int = 10000) -> None:
        """Start the service.

        Arguments:
            timeout: Timeout (in milliseconds) to wait for service to report it's ready.

        Raises:
            ServiceError: On error in communication with service.
            TimeoutError: When timeout expires.
        """
        if not self._ext_mngr:
            self.mngr = ChannelManager(zmq.Context.instance())
            self.mngr.log_context = weakref.proxy(self)
        iccp = ICCPController()
        iccp.on_stop_controller = self.handle_stop_controller
        chn: PairChannel = self.mngr.create_channel(PairChannel,
                                                    f'{self.name}.{SVC_CTRL}',
                                                    iccp,
                                                    wait_for=Direction.IN,
                                                    sock_opts={
                                                        'rcvhwm': 5,
                                                        'sndhwm': 5,
                                                    })
        chn.protocol.log_context = self.log_context
        self.mngr.warm_up()
        chn.bind(self.ctrl_addr)
        #
        self.runtime = Thread(target=service_thread,
                              name=self.name,
                              args=(self.service, self.config, self.ctrl_addr,
                                    self.peer_uid),
                              daemon=False)
        self.runtime.start()
        #
        try:
            result = chn.wait(timeout)
            if result == Direction.IN:
                msg: ICCPMessage = chn.receive()
                if msg is INVALID:
                    raise ServiceError("Invalid response from service")
                elif msg.msg_type is MsgType.READY:
                    self.peer = msg.peer.copy()
                    self.endpoints = msg.endpoints.copy()
                elif msg.msg_type is MsgType.ERROR:
                    raise ServiceError(msg.error)
                else:
                    raise ServiceError(
                        "ICCP protocol error - unexpected message")
            elif not self.is_running():
                raise ServiceError("Service start failed for unknown reason")
            else:
                raise TimeoutError("Service did not started in time")
        except Exception:
            if not self._ext_mngr:
                self.mngr.shutdown(forced=True)
            raise

    def stop(self, *, timeout: int = 10000) -> None:
        """Stop the service. Does nothing if service is not running.

        Arguments:
            timeout: None (infinity), or timeout (in milliseconds) for the operation.

        Raises:
            ServiceError: On error in communication with service.
            TimeoutError: When service does not stop in time.
        """
        s = monotonic()
        try:
            chn: PairChannel = self.mngr.channels[f'{self.name}.{SVC_CTRL}']
            if self.is_running():
                chn.send(
                    cast(ICCPController, chn.protocol).stop_msg(), chn.session)
                result = chn.wait(timeout)
                if result == Direction.IN:
                    msg: ICCPMessage = chn.receive()
                    if msg is INVALID:
                        self.outcome = Outcome.ERROR
                        self.details = ["Invalid response from service"]
                        raise ServiceError("Invalid response from service")
                    elif msg.msg_type is MsgType.ERROR:
                        self.outcome = Outcome.ERROR
                        self.details = [msg.error]
                        raise ServiceError(msg.error)
                    elif msg.msg_type is MsgType.FINISHED:
                        self.outcome = msg.outcome
                        self.details = msg.details
                else:
                    warnings.warn("Service shutdown not confirmed",
                                  RuntimeWarning)
                #
                if self.is_running():
                    e = monotonic()
                    if timeout is not None:
                        timeout = timeout - int((e - s) * 1000)
                        if timeout < 0:
                            timeout = 0
                    self.runtime.join(timeout)
                    if self.runtime.is_alive():
                        raise TimeoutError("The service did not stop in time")
            else:
                result = chn.wait(0)
                if result == Direction.IN:
                    msg: ICCPMessage = chn.receive()
                    if msg is INVALID:
                        self.outcome = Outcome.ERROR
                        self.details = ["Invalid response from service"]
                        raise ServiceError("Invalid response from service")
                    elif msg.msg_type is MsgType.ERROR:
                        self.outcome = Outcome.ERROR
                        self.details = [msg.error]
                        raise ServiceError(msg.error)
                    elif msg.msg_type is MsgType.FINISHED:
                        self.outcome = msg.outcome
                        self.details = msg.details
        finally:
            if not self._ext_mngr:
                self.mngr.shutdown(forced=True)

    def terminate(self) -> None:
        """Terminate the service.

        Terminate should be called ONLY when call to stop() (with sensible timeout) fails.
        Does nothing when service is not running.

        Raises:
            Error:  When service termination fails.
        """
        if self.is_running():
            tid = ctypes.c_long(self.runtime.ident)
            res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
                tid, ctypes.py_object(SystemExit))
            if res == 0:
                raise Error(
                    "Service termination failed due to invalid thread ID.")
            if res != 1:
                # if it returns a number greater than one, you're in trouble,
                # and you should call it again with exc=NULL to revert the effect
                ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
                raise Error(
                    "Service termination failed due to PyThreadState_SetAsyncExc failure"
                )

    def join(self, timeout=None) -> None:
        """Wait until service stops.

        Arguments:
            timeout: Floating point number specifying a timeout for the operation in
                     seconds (or fractions thereof).
        """
        if self.runtime:
            self.runtime.join(timeout)