Ejemplo n.º 1
0
Archivo: id15.py Proyecto: kif/UPBL09a
    def __init__(self):
        """
        """
        Plugin.__init__(self)
        self.queue_in = Queue()
        self.queue_out = Queue()
        self.queue_saver = None
        self.quit_event = Event()
        self.pool = []
        self.raw_saver = None

        self.ai = None  # this is the azimuthal integrator to use
        self.npt = 2000
        self.npt_azim = 256
        self.input_files = []
        self.method = "full_ocl_csr"
        self.unit = "q_nm^-1"
        self.output_file = None
        self.mask = None
        self.wavelength = None
        self.polarization_factor = None
        self.do_SA = False
        self.dummy = None
        self.delta_dummy = None
        self.norm = 1
        self.error_model = None  # "poisson"
        self.save_raw = None
        self.raw_nxs = None
        self.raw_ds = None
        self.raw_compression = None
        self.integration_method = "integrate1d"
        self.sigma_clip_thresold = 3
        self.sigma_clip_max_iter = 5
        self.medfilt1d_percentile = (10, 90)
Ejemplo n.º 2
0
class Actor:
    def __init__(self):
        self._mailbox = Queue()

    def send(self, msg):
        self._mailbox.put(msg)

    def recv(self):
        msg = self._mailbox.get()
        if msg is ActorExit:
            raise ActorExit()
        return msg

    def start(self):
        self._terminated = Event()
        t = Thread(target=self._bootstrap)
        t.daemon = True
        t.start()

    def _bootstrap(self):
        try:
            self.run()
        except ActorExit:
            pass
        finally:
            self._terminated.set()

    def join(self):
        self._terminated.wait()

    def run(self):
        while True:
            msg = self.recv()
Ejemplo n.º 3
0
class Uploader(threading.Thread):

    def __init__(self, backend):
        super().__init__()
        self._backend = backend
        self._upload_queue = Queue()
        self._keep_going = True
        self._retry = None

    def queue_image(self, path):
        """Queue an image for upload."""
        self._upload_queue.put(path)

    def run(self):
        while self._keep_going:
            if self._retry is None:
                path = self._upload_queue.get()
            else:
                path = self._retry
            if path is None:
                break
            with open(path, "rb") as img:
                success = self._backend.upload_image(img.read())
            if not success:
                # try again
                self._retry = path
            else:
                self._retry = None

    def stop(self):
        """Terminate the thread."""
        self._keep_going = False
        self._upload_queue.put(None)
Ejemplo n.º 4
0
    def test_wings_push_switches(self):
        """Testing push switches"""
        # init wings
        settings = {"pins": {"wings": {"left_switch": 17, "right_switch": 4, "position": 25, "movement": 22}}}
        event_queue = Queue()
        logging.basicConfig()
        logger = logging.getLogger(name="TuxEatPi")
        logger.setLevel(logging.DEBUG)
        wings = FakeWings(settings, event_queue, logger)
        # Test calibrate
        self.assertEqual(wings.get_position(), "down")

        # test left switch event
        wings.push_wing('left')
        event = event_queue.get(timeout=5)
        self.assertEqual(event.component, 'FakeWings')
        self.assertEqual(event.pin_id, wings.pins.get('left_switch'))
        self.assertEqual(event.name, 'left_switch')

        # test left switch event
        wings.push_wing('right')
        event = event_queue.get(timeout=5)
        self.assertEqual(event.component, 'FakeWings')
        self.assertEqual(event.pin_id, wings.pins.get('right_switch'))
        self.assertEqual(event.name, 'right_switch')
Ejemplo n.º 5
0
class PreviewDispatcherThread(QThread):
    """
    Thread used to dispatch the element to each preview worker thread.

    :param queue: The main queue containing the elements to process.
    :param mo_signal: The signal to pass to the MO preview worker, updates the MO preview.
    :param nmm_signal: The signal to pass to the NMM preview worker, updates the NMM preview.
    :param code_signal: The signal to pass to the code preview worker, updates the code preview.
    """
    def __init__(self, queue, code_signal, **kwargs):
        super().__init__()
        self.queue = queue
        self.gui_queue = Queue()
        self.code_queue = Queue()

        self.code_thread = PreviewCodeWorker(self.code_queue, code_signal)
        self.code_thread.start()
        self.gui_thread = PreviewGuiWorker(self.gui_queue, **kwargs)
        self.gui_thread.start()

    def run(self):
        while True:
            # wait for next element
            element = self.queue.get()

            if element is not None:
                element.write_attribs()
                element.load_metadata()
                element.sort()

            # dispatch to every queue
            self.gui_queue.put(element)
            self.code_queue.put(element)
Ejemplo n.º 6
0
class ScraperThread(QThread):

    result_signal = pyqtSignal(dict)

    def __init__(self, parent=None):
        super(ScraperThread, self).__init__(parent)
        self._queue = Queue()
        self._stop = False

    def run(self):
        self._stop = False
        while not self._queue.empty() and not self._stop:
            processed_url = self._queue.get()
            result = requests.get(processed_url)
            self.result_signal.emit({'headers': result.headers})

    def clear_queue(self):
        self._queue = Queue()

    @property
    def queue(self):
        return self._queue

    @queue.setter
    def queue(self, urls):
        for url in urls:
            self._queue.put(url.strip())

    @property
    def stop(self):
        return self._stop

    @stop.setter
    def stop(self, stop):
        self._stop = stop
Ejemplo n.º 7
0
  def crawl(self, urls, follow_links=False):
    links, seen = set(), set()
    queue = Queue()
    converged = threading.Event()

    def execute():
      while not converged.is_set():
        try:
          url = queue.get(timeout=0.1)
        except Empty:
          continue
        if url not in seen:
          seen.add(url)
          hrefs, rel_hrefs = self.execute(url)
          links.update(hrefs)
          if follow_links:
            for href in rel_hrefs:
              if href not in seen:
                queue.put(href)
        queue.task_done()

    for url in urls:
      queue.put(url)
    for _ in range(self._threads):
      worker = threading.Thread(target=execute)
      worker.daemon = True
      worker.start()
    queue.join()
    converged.set()
    return links
Ejemplo n.º 8
0
def bfsreduceall(sudokuObject):
    source = sudokuObject.solutiondriverNoGuess()
    if source == "Bad Response":
        return None
    elif type(source) is Sudoku:
        return source

    Q = Queue([sudokuObject])
    loop = 1
    startminnodes = None
    while not Q.isempty():
        # print("loop no",loop)
        if loop > 2: return dfsreduceall(sudokuObject)
        # if startminnodes is not None:
        #     for node in startminnodes:print(node.allowedset,node.id)
        # print("Q.unqueue()",Q)
        v = Q.unqueue()
        unfnodes = v.getOrderedMinnodesUnfilled()  # unfinished nodes
        if loop == 1: startminnodes = unfnodes
        for minnode in unfnodes:
            for permutedvalue in minnode.allowedset:
                global numsudokuobjects
                numsudokuobjects += 1
                newsudokuObject = sudokuObject.__deepcopy__()
                newsudokuObject.nodes[minnode.id].setValue(permutedvalue)
                postsolveobject = newsudokuObject.solutiondriverNoGuess()
                if type(postsolveobject) is Sudoku:
                    return postsolveobject
                elif postsolveobject != "Bad Response":
                    Q.enqueue(newsudokuObject)
                loop += 1

    return None
Ejemplo n.º 9
0
def ExtractVideoInfo(courseURL):
	"""
	提取视频信息。
	"""
	queue = Queue()
	APIcaller = FlvcdAPICaller()
	parser = Open163Parser(courseURL)
	for i in range(10):
		worker = Worker(queue, parser, APIcaller)
		worker.daemon = True
		worker.start()
	parser.fillQ(queue)
	queue.join()
	videoList = parser.getResult()
	videoInfo = {
		"courseURL":courseURL,
		"videoList":videoList,
	}
	# dump complete video information.
	json.dump(videoInfo, open("videoList.json", "w"))
	print("Complete video information written to videoList.json.")
	# dump video URLs.
	urls = []
	for video in videoList:
		urls.append(video['url']+'\n')
	with open('urls.txt', 'w') as out:
		out.writelines(urls)
	print("Video URLs written to urls.txt.")
Ejemplo n.º 10
0
def main():
    # These three parameters are user defined
    client_id = "*****"
    username = "******"
    password = "******"

    tokenDict = get_tokenDict(client_id, username, password)

    ts = time()
    download_dir = setup_download_dir()
    links = [l for l in get_links(client_id, tokenDict) if l.endswith(".jpg")]
    # Create a queue to communicate with the worker threads
    queue = Queue()
    # Create worker threads

    for x in range(int(argv[1])):
        worker = DownloadWorker(queue)
        # Setting daemon to True will let the main thread exit even though the workers are blocking
        worker.daemon = True
        worker.start()
    # Put the tasks into the queue as a tuple
    for link in links:
        logger.info("Queueing {}".format(link))
        queue.put((download_dir, link))
    # Causes the main thread to wait for the queue to finish processing all the tasks
    queue.join()
    print("Took {}".format(time() - ts))
Ejemplo n.º 11
0
class BlockingInProcessChannel(InProcessChannel):

    def __init__(self, *args, **kwds):
        # type: (object, object) -> object
        super(BlockingInProcessChannel, self).__init__(*args, **kwds)
        self._in_queue = Queue()

    def call_handlers(self, msg):
        self._in_queue.put(msg)

    def get_msg(self, block=True, timeout=None):
        """ Gets a message if there is one that is ready. """
        if timeout is None:
            # Queue.get(timeout=None) has stupid uninteruptible
            # behavior, so wait for a week instead
            timeout = 604800
        return self._in_queue.get(block, timeout)

    def get_msgs(self):
        """ Get all messages that are currently ready. """
        msgs = []
        while True:
            try:
                msgs.append(self.get_msg(block=False))
            except Empty:
                break
        return msgs

    def msg_ready(self):
        """ Is there a message that has been received? """
        return not self._in_queue.empty()
Ejemplo n.º 12
0
class Metric(object):
    """
    This class stores generic time-series data in a queue.
    Values are stored as (timestamp, value) tuples
    """

    def __init__(self):
        self.metric = Queue()

    def push(self, value, timestamp=None):
        if timestamp is None:
            timestamp = int(time.time())
        elif not isinstance(timestamp, int):
            raise ValueError(
                "Timestamp should be an integer, but it is '%s'" %
                type(timestamp))
        self.metric.put((timestamp, value))

    def next(self):
        try:
            return self.metric.get_nowait()
        except Empty:
            raise StopIteration

    def get(self):
        # TODO: decide what we should return here
        return None

    def __iter__(self):
        return self
Ejemplo n.º 13
0
    def _port_ping(self, hosts: Queue, interface: str, results: set):
        self.logger.debug("{}: Starting TCP SYN ping thread.".format(threading.current_thread().name))

        while True:
            ip = hosts.get()  # type: IPAddress
            ip_str = str(ip)

            # Send SYN with random Src Port for each Dst port
            for dstPort in self.portstoscan:
                srcPort = random.randint(1025, 65534)
                resp = sr1(IP(dst=ip_str) / TCP(sport=srcPort, dport=dstPort, flags=ScapyTCPFlag.SYN), timeout=1,
                           verbose=False,
                           iface=interface)
                if resp and resp.haslayer(TCP):
                    if resp[TCP].flags == (TCPFlag.SYN | TCPFlag.ACK) or resp[TCP].flags == (TCPFlag.RST | TCPFlag.ACK):
                        # Send Reset packet (RST)
                        send(IP(dst=ip_str) / TCP(sport=srcPort, dport=dstPort, flags=ScapyTCPFlag.RST),
                             iface=interface, verbose=False)

                        # We know the port is closed or opened (we got a response), so we deduce that the host exists
                        node = NetworkNode()
                        node.ip = ip
                        node.mac = EUI(resp.src)
                        node.host = resolve_ip(resp[IP].src)
                        results.add(node)

                        self.logger.debug(
                            "Found a live host by pinging port {port_nbr}: {live_host}.".format(port_nbr=dstPort,
                                                                                                live_host=str(node)))

                        # We don't need to test the other ports. We know the host exists.
                        break

            hosts.task_done()
Ejemplo n.º 14
0
 def __init__(self, token_file, dev=False):
     """
     Not only does it represent a client connection to the discord server, but it also initializes the used api tokens
     and a representation of the League client by generating a League object.
   :param str token_file: location of the token file containing the api tokens
   :type token_file: str
   :param dev: allows the bot to start in a development environment with a separate discord bot token
   :type dev: bool
   :returns: GanjaClient -- the GanjaClient object acting as the discord client
     """
     super(GanjaClient, self).__init__()
     with open(token_file) as f:
         data = json.load(f)
         self.server_token = data['token']
         self.dev_token = data['dev_token']
         self.wolfram = data['wolfram_token']
         open_token = data['open_league_token']
         riot_token = data['league_token']
     self.database = '.databases/'
     self.http_header = {'User-Agent': 'Mozilla/5.0', 'Accept': 'text/html,application/json'}
     self.list_commands = {}
     self.voice = None
     self.player = None
     self.last_channel = None
     self.queue = Queue()
     self.queue_name = Queue()
     self.league = League(open_token, riot_token, self.http_header)
     for i in os.listdir('data'):
         with open('data/' + i) as f:
             lines = f.read().splitlines()
             self.list_commands[i] = lines
     if dev:
         self.token = self.dev_token
     else:
         self.token = self.server_token
Ejemplo n.º 15
0
    def track(self):
        queue = Queue()
        thread = Thread(target=self._update_status, args=(queue,))
        thread.start()

        widgets = ['Processing...', AnimatedMarker()]
        progress_indicator = ProgressBar(widgets=widgets, maxval=UnknownLength)
        progress_indicator.start()

        content = {}
        for indicator_count in itertools.count():
            if not queue.empty():
                content = queue.get()
                if isinstance(content, Exception):
                    raise content
                widgets[0] = self._get_message(content)
            progress_indicator.update(indicator_count)
            if content.get('processed'):
                break
            sleep(0.1)
        progress_indicator.finish()

        self.__content = content

        return content
Ejemplo n.º 16
0
def spin_server(queue: Queue, transport: AbstractTransport):
    while True:
        '''
        do stuff
        '''
        b64image = transport.handle_client()
        queue.put(b64image, False)
Ejemplo n.º 17
0
def main():
    ts = time.time()
    # create a queue to communicate with the worker threads
    queue=Queue()
    # Create 2 wroker threads
    for x in range(6):
        worker = doExpbatWorker(queue)
        # setting daemon to True will let then main thread exit even though the workers are blocking
        worker.demon = True
        worker.start()
    #for i in range(9):
    #    queue.put(('~/'+str(i)+'.bat','dfdf'))
    
    jb = []
    batpath='g:/migration/exp_script/'
    csvpath='g:/migration/mig_xw/'
    jb.append((batpath+'cps_xw_studentcourse.bat', batpath+'cps_xw_studentcourse.bat '+' AcademicAdministration '+ csvpath+'cps_xw_studentcourse.csv 202.205.160.199 jwc wangbin'))
    jb.append((batpath+'cps_xw_avgscore.bat', batpath+'cps_xw_avgscore.bat '+' AcademicAdministration '+ csvpath+'cps_xw_avgscore.csv 202.205.160.199 jwc wangbin'))
    #jb.append((batpath+'exmm_composescore330.bat', batpath+'exmm_composescore330.bat '+' zhejiang '+ csvpath+'exmm_composescore330.csv 202.205.160.183 sa !!!WKSdatatest!!!'))
    #jb.append((batpath+'cps_xw_avgscore.bat', batpath+'cps_xw_avgscore.bat '+' AcademicAdministration '+ csvpath+'cps_xw_avgscore.csv 202.205.160.199 jwc wangbin'))
    #jb.append((batpath+'exmm_xkStandardplan330.bat', batpath+'exmm_xkStandardplan330.bat '+' zhejiang '+ csvpath+'exmm_xkstandsartplan330.csv 202.205.160.183 sa !!!WKSdatatest!!!'))
    #jb.append((batpath+'exmm_xkStandard330.bat', batpath+'exmm_xkStandard330.bat '+' zhejiang '+ csvpath+'exmm_xkstandsart330.csv 202.205.160.183 sa !!!WKSdatatest!!!'))
    excl=[]
    for item in jb:
         find = False
         for i in excl:
            if i in item[0]:
                find = True
                break
         if find == False:
            #if 'exemptapply' in item[0]:
            queue.put(item)
    queue.join()
    print('took %s minuters '%((time.time()-ts)/60,))
Ejemplo n.º 18
0
    def download_cover(self, log, result_queue, abort,  # {{{
            title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
        cached_url = self.get_cached_cover_url(identifiers)
        if cached_url is None:
            log.info('No cached cover found, running identify')
            rq = Queue()
            self.identify(log, rq, abort, title=title, authors=authors,
                    identifiers=identifiers)
            if abort.is_set():
                return
            results = []
            while True:
                try:
                    results.append(rq.get_nowait())
                except Empty:
                    break
            results.sort(key=self.identify_results_keygen(
                title=title, authors=authors, identifiers=identifiers))
            for mi in results:
                cached_url = self.get_cached_cover_url(mi.identifiers)
                if cached_url is not None:
                    break
        if cached_url is None:
            log.info('No cover found')
            return

        if abort.is_set():
            return
        br = self.browser
        log('Downloading cover from:', cached_url)
        try:
            cdata = br.open_novisit(cached_url, timeout=timeout).read()
            result_queue.put((self, cdata))
        except:
            log.exception('Failed to download cover from:', cached_url)
Ejemplo n.º 19
0
class BlockingShellSocketChannel(ShellSocketChannel):

    def __init__(self, context, session, address=None):
        super(BlockingShellSocketChannel, self).__init__(context, session,
                                                        address)
        self._in_queue = Queue()

    def call_handlers(self, msg):
        #io.rprint('[[Shell]]', msg) # dbg
        self._in_queue.put(msg)

    def msg_ready(self):
        """Is there a message that has been received?"""
        if self._in_queue.qsize() == 0:
            return False
        else:
            return True

    def get_msg(self, block=True, timeout=None):
        """Get a message if there is one that is ready."""
        return self._in_queue.get(block, timeout)

    def get_msgs(self):
        """Get all messages that are currently ready."""
        msgs = []
        while True:
            try:
                msgs.append(self.get_msg(block=False))
            except Empty:
                break
        return msgs
Ejemplo n.º 20
0
class EventListener(FileSystemEventHandler):
    """
    Listens for changes to files and re-runs tests after each change.
    """
    def __init__(self, extensions=[]):
        super(EventListener, self).__init__()
        self.event_queue = Queue()
        self.extensions = extensions or DEFAULT_EXTENSIONS

    def on_any_event(self, event):
        """
        Called when a file event occurs.
        Note that this gets called on a worker thread.
        """
        # Filter for allowed event types
        if not isinstance(event, WATCHED_EVENTS):
            return

        src_path = os.path.relpath(event.src_path)
        dest_path = None
        if isinstance(event, FileMovedEvent):
            dest_path = os.path.relpath(event.dest_path)

        # Filter files that don't match the allowed extensions
        if not event.is_directory and self.extensions != ALL_EXTENSIONS:
            src_ext = os.path.splitext(src_path)[1].lower()
            src_included = src_ext in self.extensions
            dest_included = False
            if dest_path:
                dest_ext = os.path.splitext(dest_path)[1].lower()
                dest_included = dest_ext in self.extensions
            if not src_included and not dest_included:
                return

        self.event_queue.put((type(event), src_path, dest_path))
Ejemplo n.º 21
0
def runexternal_out_and_err(cmd, check_memleak = True):
    command = shlex.split(cmd)
    p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    if p.stdout is not None:
        q_stdout = Queue()
        t_stdout = Thread(target=read_in_thread, args=(p.stdout, q_stdout))
        t_stdout.start()
    else:
        q_stdout = None
        ret_stdout = ''

    if p.stderr is not None:
        q_stderr = Queue()
        t_stderr = Thread(target=read_in_thread, args=(p.stderr, q_stderr))
        t_stderr.start()
    else:
        q_stderr = None
        ret_stderr = ''
        
    if q_stdout is not None:
        ret_stdout = q_stdout.get().decode('ascii')
    if q_stderr is not None:
        ret_stderr = q_stderr.get().decode('ascii')

    p.wait()

    return (ret_stdout, ret_stderr)
Ejemplo n.º 22
0
class HandlerThread(Thread):
    def __init__(self, bot, lock):
        self.bot = bot
        self.queue = Queue()
        self.lock = lock
        super().__init__()

    def run(self):
        while True:
            try:
                items = None
                args = self.queue.get()
                with self.lock:
                    items = self.bot.__irccallbacks__[args[0]]
                
                for item in items:
                    if not get_core(item):
                        if self.bot.verbose:
                            print("[command thread:%s] calling fn %s" % (datetime.datetime.utcnow(), item.__name__))
                        item(self.bot, *(args[1]))

            except BaseException as e:
                if not isinstance(e, SystemExit) and not isinstance(e, KeyboardInterrupt):
                    traceback.print_exc()

    def push(self, cname, *args):
        self.queue.put(tuple([cname] + list(args)))
Ejemplo n.º 23
0
class JQueryChaliceRequestHandler(BaseHTTPRequestHandler):

	server_version = "Extremon/0.1"

	def do_GET(self):
		self.outq=Queue(maxsize=10)
		self.running=True
		self.server.add_consumer(self)

		self.send_response(200)
		self.send_header("Content-type", "text/plain")
		self.send_header("Access-Control-Allow-Origin", "*")
		self.end_headers()
		self.missed=0
		self.running=True

		try:
			while self.running:
				try:
					message = self.outq.get() + bytes('%s.timestamp=%.2f\n%s.missed=%d\n\n' % (self.server.prefix,time.time(),self.server.prefix,self.missed),'UTF-8')
					self.wfile.write(bytes(str(len(message)) + ";", 'UTF-8'))
					self.wfile.write(message)
					self.wfile.write(b';')
					self.outq.task_done()
				except error:
					self.running=False
		finally:
			self.server.remove_consumer(self)

	def write(self,data):
		try:
			self.outq.put(data,block=False)
		except Full:
			self.missed+=1
Ejemplo n.º 24
0
    def test_handle_failing_upload_xlog(self):
        sleeps = []

        def sleep(sleep_amount):
            sleeps.append(sleep_amount)
            time.sleep(0.001)

        callback_queue = Queue()
        storage = MockStorageRaising()
        self.transfer_agent.sleep = sleep
        self.transfer_agent.get_object_storage = storage
        assert os.path.exists(self.foo_path) is True
        self.transfer_queue.put({
            "callback_queue": callback_queue,
            "file_size": 3,
            "filetype": "xlog",
            "local_path": self.foo_path,
            "metadata": {"start-wal-segment": "00000001000000000000000C"},
            "site": self.test_site,
            "type": "UPLOAD",
        })
        with pytest.raises(Empty):
            callback_queue.get(timeout=0.1)
        alert_file_path = os.path.join(self.config["alert_file_dir"], "upload_retries_warning")
        assert os.path.exists(alert_file_path) is True
        os.unlink(alert_file_path)
        expected_sleeps = [0.5, 1, 2, 4, 8, 16, 20, 20]
        assert sleeps[:8] == expected_sleeps
Ejemplo n.º 25
0
Archivo: lib.py Proyecto: humw/ToolBox
def is_alive(ip_addr):
    lock = threading.Lock()
    probe_ports = [22, 3389]
    q = Queue()
    status = False
    for port in probe_ports:
        q.put(port)

    class Probe(threading.Thread):

        def __init__(self):
            threading.Thread.__init__(self)

        def run(self):
            try:
                self.port = q.get(block=False)
            except Empty:
                return False
            if tcp_probe(ip_addr, self.port):
                with lock:
                    nonlocal status
                    status = True
                # print("Success to connect to " + ip_addr + " " + str(self.port))
            # else:
                # print("Failed to connect to " + ip_addr + " " + str(self.port))
            q.task_done()

    for x in range(5):
        p = Probe()
        p.daemon = True
        p.start()

    q.join()
    return status
Ejemplo n.º 26
0
def runexternal_out_and_err(cmd, check_memleak=True):
    # pylint: disable=unused-argument
    command = shlex.split(cmd)
    p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    if p.stdout is not None:
        q_stdout = Queue()
        t_stdout = Thread(target=read_in_thread, args=(p.stdout, q_stdout))
        t_stdout.start()
    else:
        q_stdout = None
        ret_stdout = ''

    if p.stderr is not None:
        q_stderr = Queue()
        t_stderr = Thread(target=read_in_thread, args=(p.stderr, q_stderr))
        t_stderr.start()
    else:
        q_stderr = None
        ret_stderr = ''

    if q_stdout is not None:
        ret_stdout = q_stdout.get().decode('ascii')
    if q_stderr is not None:
        ret_stderr = q_stderr.get().decode('ascii')

    waitcode = p.wait()
    if waitcode != 0:
        ret_stderr = ret_stderr + '\nERROR ret code = %d' % waitcode

    return (ret_stdout, ret_stderr)
Ejemplo n.º 27
0
    def test_producer_consumer_with_queues(self):
        # we currently just stress yappi, no functionality test is done here.
        yappi.start()
        import time
        if utils.is_py3x():
            from queue import Queue
        else:
            from Queue import Queue
        from threading import Thread
        WORKER_THREAD_COUNT = 50
        WORK_ITEM_COUNT = 2000
        def worker():
            while True:
                item = q.get()                
                # do the work with item
                q.task_done()

        q = Queue()
        for i in range(WORKER_THREAD_COUNT):
            t = Thread(target=worker)
            t.daemon = True
            t.start()
             
        for item in range(WORK_ITEM_COUNT):
            q.put(item)
        q.join()# block until all tasks are done
        #yappi.get_func_stats().sort("callcount").print_all()
        yappi.stop()
Ejemplo n.º 28
0
 def wrapper(*args, **kargs):
     q = Queue()
     def callback(value):
         q.put(None)
     def errback(failure):
         # Retrieve and save full exception info
         try:
             failure.raiseException()
         except:
             q.put(sys.exc_info())
     def g():
         try:
             d = func(*args, **kargs)
             try:
                 d.addCallbacks(callback, errback)
             # Check for a common mistake and display a nice error
             # message
             except AttributeError:
                 raise TypeError("you must return a twisted Deferred "
                                 "from your test case!")
         # Catch exceptions raised in the test body (from the
         # Twisted thread)
         except:
             q.put(sys.exc_info())
     reactor.callFromThread(g)
     try:
         error = q.get(timeout=timeout)
     except Empty:
         raise TimeExpired("timeout expired before end of test (%f s.)"
                           % timeout)
     # Re-raise all exceptions
     if error is not None:
         exc_type, exc_value, tb = error
         raise exc_type(exc_value).with_traceback(tb)
 def __init__(self):
     self.active_calls = []
     self.waiting_calls = Queue()
     self.respondents = []
     self.free_respondents = Queue()
     self.managers = []
     self.directors = []
Ejemplo n.º 30
0
    def _put(self, xxx_todo_changeme):
        # Only consider re-evaluation if we are still on the same eval
        # session.
        (eval_sess, is_reeval) = xxx_todo_changeme
        if is_reeval and self._curr_eval_sess is not eval_sess:
            return

        replace = True
        if hasattr(eval_sess, "ctlr") and eval_sess.ctlr and eval_sess.ctlr.keep_existing:
            # Allow multiple eval sessions; currently used for variable
            # highlighting (bug 80095), may pick up additional uses.  Note that
            # these sessions can still get wiped out by a single replace=False
            # caller.
            replace = False

        if replace:
            # We only allow *one* eval session at a time.
            # - Drop a possible accumulated eval session.
            if len(self.queue):
                self.queue.clear()
            ## - Abort the current eval session.
            if not is_reeval and self._curr_eval_sess is not None:
                self._curr_eval_sess.ctlr.abort()

        # Lazily start the eval thread.
        if not self.isAlive():
            self.start()

        Queue._put(self, (eval_sess, is_reeval))
        if replace:
            assert len(self.queue) == 1
Ejemplo n.º 31
0
Archivo: main.py Proyecto: eetze/LPOJ
# coding=utf-8

import MySQLdb
from queue import Queue
import socket
import json
from time import sleep
import threading
import os

mutex = threading.Lock()  # queue mutex

queue = Queue()  # 全局判题列表
myjsonfile = open("./setting.json", 'r')
judgerjson = json.loads(myjsonfile.read())

if os.environ.get("DB_USER"):
    judgerjson["db_ip"] = os.environ.get("DB_HOST")
    judgerjson["db_pass"] = os.environ.get("DB_PASSWORD")
    judgerjson["db_user"] = os.environ.get("DB_USER")
    judgerjson["db_port"] = os.environ.get("DB_PORT")

try:
    db = MySQLdb.connect(judgerjson["db_ip"],
                         judgerjson["db_user"],
                         judgerjson["db_pass"],
                         judgerjson["db_database"],
                         int(judgerjson["db_port"]),
                         charset='utf8')
except Exception as e:
    print(e)
Ejemplo n.º 32
0
# coding:utf-8
# 检验zookeeper是否存在未授权
# author:ske

import threading
from queue import Queue
from kazoo.client import KazooClient
import sys

event = threading.Event()
event.set()
q = Queue(-1)


class multi_thread(threading.Thread):
    def __init__(self, num, q):
        threading.Thread.__init__(self)
        self.num = num
        self.q = q

    def run(self):
        while event.is_set():  #is_set()查看信号,由于之前设置了Flag为True,所以为真
            if self.q.empty():  #如果队列空了就跳出循环,终止
                event.clear()
            else:  #如果队列不为空
                ip = self.q.get()
                self.check_zookeeper(ip)

    def check_zookeeper(self, ip):
        try:
            zk = KazooClient(hosts='{}:2181'.format(ip))
Ejemplo n.º 33
0
                    'content': content,
                    'vote': vote,
                    'comments': comments
                }

                with self.lock:
                    self.f.write(
                        json.dumps(result, ensure_ascii=False).encode("utf-8")
                        + "\n")
        except Exception as e:
            print("parse data ", e)
        with self.lock:
            total += 1


data_queue = Queue()
exitFlag_Parser = False
lock = threading.Lock()
total = 0


def main():
    output = open("qiushibaike.json", "a")
    pageQueue = Queue(50)
    for page in range(1, 11):
        pageQueue.put(page)

    # 初始化
    crawlthreads = []
    crawlList = ["crawl-1", "crawl-2", "crawl-3"]
Ejemplo n.º 34
0
    global run, ans
    if (x, y) in visited:
        return
    visited.add((x, y))
    if not (-1 < x < M and -1 < y < N) or abs(field[x][y]) - h > 5 or not run:
        return
    if x == (M-1) and y == (N-1):
        run = False
        ans = step
        return
    step += 1
    h = field[x][y]
    BFS_Queue.put((step, x+1, y, h))
    BFS_Queue.put((step, x, y+1, h))
    BFS_Queue.put((step, x-1, y, h))
    BFS_Queue.put((step, x, y-1, h))


a = int(input())
for _ in range(a):
    visited = set()
    BFS_Queue = Queue()
    run = True
    ans = 0
    M, N = [int(x) for x in input().split()]
    field = [[int(x) for x in input().split()] for n in range(M)]
    BFS_Queue.put((0, 0, 0, field[0][0]))
    while run and not BFS_Queue.empty():
        BFS(*BFS_Queue.get())
    print(ans)
Ejemplo n.º 35
0
 def setq(consumer):
     consumer.ready = Queue(0)
     self.out.put(self)
Ejemplo n.º 36
0
class Parallel(object):
    """
	Schedule the tasks obtained from the build context for execution.
	"""
    def __init__(self, bld, j=2):
        """
		The initialization requires a build context reference
		for computing the total number of jobs.
		"""

        self.numjobs = j
        """
		Number of consumers in the pool
		"""

        self.bld = bld
        """
		Instance of :py:class:`waflib.Build.BuildContext`
		"""

        self.outstanding = []
        """List of :py:class:`waflib.Task.TaskBase` that may be ready to be executed"""

        self.frozen = []
        """List of :py:class:`waflib.Task.TaskBase` that cannot be executed immediately"""

        self.out = Queue(0)
        """List of :py:class:`waflib.Task.TaskBase` returned by the task consumers"""

        self.count = 0
        """Amount of tasks that may be processed by :py:class:`waflib.Runner.TaskConsumer`"""

        self.processed = 1
        """Amount of tasks processed"""

        self.stop = False
        """Error flag to stop the build"""

        self.error = []
        """Tasks that could not be executed"""

        self.biter = None
        """Task iterator which must give groups of parallelizable tasks when calling ``next()``"""

        self.dirty = False
        """Flag to indicate that tasks have been executed, and that the build cache must be saved (call :py:meth:`waflib.Build.BuildContext.store`)"""

    def get_next_task(self):
        """
		Obtain the next task to execute.

		:rtype: :py:class:`waflib.Task.TaskBase`
		"""
        if not self.outstanding:
            return None
        return self.outstanding.pop(0)

    def add_outstanding_tasks(self, new_tasks, update_count=True):
        # apply task filter if it exists
        if self.bld.options.task_filter:
            filters = self.bld.options.task_filter.split(',')
            filtered_tasks = []
            for task in new_tasks:
                if task.__class__.__name__ in filters:
                    filtered_tasks.append(task)
            new_tasks = filtered_tasks
        self.outstanding += new_tasks
        if update_count:
            self.total += len(new_tasks)

    def postpone(self, tsk):
        """
		A task cannot be executed at this point, put it in the list :py:attr:`waflib.Runner.Parallel.frozen`.

		:param tsk: task
		:type tsk: :py:class:`waflib.Task.TaskBase`
		"""
        self.frozen.append(tsk)

    def refill_task_list(self):
        """
		Put the next group of tasks to execute in :py:attr:`waflib.Runner.Parallel.outstanding`.
		"""
        while self.count > self.numjobs * GAP:
            self.get_out()

        while not self.outstanding:
            if self.count:
                self.get_out()
            elif self.frozen:
                try:
                    cond = self.deadlock == self.processed
                except AttributeError:
                    pass
                else:
                    if cond:
                        msg = 'check the build order for the tasks'
                        for tsk in self.frozen:
                            if not tsk.run_after:
                                msg = 'check the methods runnable_status'
                                break
                        lst = []
                        for tsk in self.frozen:
                            lst.append(
                                '%s\t-> %r' %
                                (repr(tsk), [id(x) for x in tsk.run_after]))
                        raise Errors.WafError('Deadlock detected: %s%s' %
                                              (msg, ''.join(lst)))
                self.deadlock = self.processed

            if self.frozen:
                self.add_outstanding_tasks(self.frozen, False)
                self.frozen = []
            elif not self.count:
                self.add_outstanding_tasks(next(self.biter))
                break

    def add_more_tasks(self, tsk):
        """
		Tasks may be added dynamically during the build by binding them to the task :py:attr:`waflib.Task.TaskBase.more_tasks`

		:param tsk: task
		:type tsk: :py:attr:`waflib.Task.TaskBase`
		"""
        if getattr(tsk, 'more_tasks', None):
            self.add_outstanding_tasks(tsk.more_tasks)

    def get_out(self):
        """
		Obtain one task returned from the task consumers, and update the task count. Add more tasks if necessary through
		:py:attr:`waflib.Runner.Parallel.add_more_tasks`.

		:rtype: :py:attr:`waflib.Task.TaskBase`
		"""
        tsk = self.out.get()
        if not self.stop:
            self.add_more_tasks(tsk)
        self.count -= 1
        self.dirty = True
        return tsk

    def error_handler(self, tsk):
        """
		Called when a task cannot be executed. The flag :py:attr:`waflib.Runner.Parallel.stop` is set, unless
		the build is executed with::

			$ waf build -k

		:param tsk: task
		:type tsk: :py:attr:`waflib.Task.TaskBase`
		"""
        if not self.bld.keep:
            self.stop = True
        self.error.append(tsk)

    def add_task(self, tsk):
        """
		Pass a task to a consumer.

		:param tsk: task
		:type tsk: :py:attr:`waflib.Task.TaskBase`
		"""
        try:
            self.pool
        except AttributeError:
            self.init_task_pool()
        self.ready.put(tsk)

    def init_task_pool(self):
        # lazy creation, and set a common pool for all task consumers
        pool = self.pool = [get_pool() for i in range(self.numjobs)]
        self.ready = Queue(0)

        def setq(consumer):
            consumer.ready = self.ready

        for x in pool:
            x.ready.put(setq)
        return pool

    def free_task_pool(self):
        # return the consumers, setting a different queue for each of them
        def setq(consumer):
            consumer.ready = Queue(0)
            self.out.put(self)

        try:
            pool = self.pool
        except AttributeError:
            pass
        else:
            for x in pool:
                self.ready.put(setq)
            for x in pool:
                self.get_out()
            for x in pool:
                put_pool(x)
            self.pool = []

    def start(self):
        """
		Give tasks to :py:class:`waflib.Runner.TaskConsumer` instances until the build finishes or the ``stop`` flag is set.
		If only one job is used, then execute the tasks one by one, without consumers.
		"""

        file_filter_list = []
        if self.bld.options.file_filter != "":
            file_filter_list = self.bld.options.file_filter.split(";")

        self.total = self.bld.total()
        if self.total == 0:
            self.stop = True

        while not self.stop:

            self.refill_task_list()

            # consider the next task
            tsk = self.get_next_task()
            if not tsk:
                if self.count:
                    # tasks may add new ones after they are run
                    continue
                else:
                    # no tasks to run, no tasks running, time to exit
                    break

            if tsk.hasrun:
                # if the task is marked as "run", just skip it
                self.processed += 1
                continue

            if self.stop:  # stop immediately after a failure was detected
                break

            try:
                if not file_filter_list:
                    st = tsk.runnable_status(
                    )  # No file filter, execute all tasks
                else:
                    # File filter, check if we should compile this task
                    bExecuteTask = False
                    st = Task.SKIP_ME
                    # check if the file is used in this task.  If so, we must execute this task
                    for input in tsk.inputs:
                        if input.abspath() in file_filter_list:
                            bExecuteTask = True
                            break

                    # this task is included in the filter
                    if bExecuteTask:
                        # a task may require other tasks run first.  These may have been skipped earlier.
                        if not hasattr(tsk, 'required_tasks'):
                            tsk.required_tasks = []

                            def add_dependent_tasks(depends, tsk):
                                for t in tsk.run_after:
                                    if t.hasrun == Task.NOT_RUN or t.hasrun == Task.SKIPPED:
                                        add_dependent_tasks(depends, t)
                                        depends.append(t)

                            # cant run a task until the run_after list is completed for all tasks in the dependency chain
                            # recurse and create a list of everything that needs to be considered
                            add_dependent_tasks(tsk.required_tasks, tsk)

                        if tsk.required_tasks:
                            # process the run_after tasks first.  postpone the current task similar to ASK_LATER handling
                            self.postpone(tsk)
                            # grab a prereq and replace the task under consideration.  These tasks may have been skipped earlier
                            tsk = tsk.required_tasks.pop(0)
                            st = tsk.runnable_status()
                            # fallout, do normal task processing
                        else:
                            # prerequisites already handled, must be runnable now.  Computing the status for side effects
                            st = tsk.runnable_status()
                            assert (st != Task.ASK_LATER)
                            st = Task.RUN_ME  # but forcing the task to run anyways

                            # override the inputs for special handling
                            if len(tsk.outputs) > 0:
                                for input in tsk.inputs:
                                    if input.abspath() in file_filter_list:
                                        # patch output file to handle special commands
                                        override_output_file = self.bld.is_option_true(
                                            'show_preprocessed_file'
                                        ) or self.bld.is_option_true(
                                            'show_disassembly')
                                        if override_output_file == True:
                                            # Get file extension
                                            if self.bld.is_option_true(
                                                    'show_disassembly'):
                                                file_ext = '.diasm'
                                            elif self.bld.is_option_true(
                                                    'show_preprocessed_file'):
                                                file_ext = '.i'
                                            else:
                                                self.bld.fatal(
                                                    "Command option file extension output file implementation missing."
                                                )

                                            # Set output file
                                            out_file = input.change_ext(
                                                file_ext)
                                            tsk.outputs[0] = out_file

                                            # Add post build message to allow VS user to open the file
                                            if getattr(self.bld.options,
                                                       'execsolution', ""):
                                                self.bld.post_build_msg_warning.append(
                                                    '%s(0): warning: %s.' %
                                                    (out_file.abspath(),
                                                     "Click here to open output file"
                                                     ))
                            # fallout, resume normal task processing with the overrides
            except Exception:
                self.processed += 1
                # TODO waf 1.7 this piece of code should go in the error_handler
                tsk.err_msg = Utils.ex_stack()
                if not self.stop and self.bld.keep:
                    tsk.hasrun = Task.SKIPPED
                    if self.bld.keep == 1:
                        # if -k stop at the first exception, if -kk try to go as far as possible
                        if Logs.verbose > 1 or not self.error:
                            self.error.append(tsk)
                        self.stop = True
                    else:
                        if Logs.verbose > 1:
                            self.error.append(tsk)
                    continue
                tsk.hasrun = Task.EXCEPTION
                self.error_handler(tsk)
                continue

            if st == Task.ASK_LATER:
                self.postpone(tsk)
            elif st == Task.SKIP_ME:
                self.processed += 1
                tsk.hasrun = Task.SKIPPED
                self.add_more_tasks(tsk)
            else:
                # run me: put the task in ready queue
                tsk.position = (self.processed, self.total)
                self.count += 1
                tsk.master = self
                self.processed += 1

                if self.numjobs == 1 or self.bld.options.file_filter != '':
                    tsk.process()
                else:
                    self.add_task(tsk)

        # self.count represents the tasks that have been made available to the consumer threads
        # collect all the tasks after an error else the message may be incomplete
        while self.error and self.count:
            self.get_out()

        #print loop
        assert (self.count == 0 or self.stop)

        # free the task pool, if any
        self.free_task_pool()
Ejemplo n.º 37
0
            pass

    def loop(self):
        """
		Obtain tasks from :py:attr:`waflib.Runner.TaskConsumer.ready` and call
		:py:meth:`waflib.Task.TaskBase.process`. If the object is a function, execute it.
		"""
        while 1:
            tsk = self.ready.get()
            if not isinstance(tsk, Task.TaskBase):
                tsk(self)
            else:
                tsk.process()


pool = Queue()
"""
Pool of task consumer objects
"""


def get_pool():
    """
	Obtain a task consumer from :py:attr:`waflib.Runner.pool`.
	Do not forget to put it back by using :py:func:`waflib.Runner.put_pool`
	and reset properly (original waiting queue).

	:rtype: :py:class:`waflib.Runner.TaskConsumer`
	"""
    try:
        return pool.get(False)
Ejemplo n.º 38
0
def grow_cluster_st(
    D_spatial: ndarray,
    D_temporal: ndarray,
    labels: ndarray,
    set_neighbours: set,
    label_cluster: int,
    eps_spatial: float,
    eps_temporal: float,
    min_pts: int,
) -> None:
    """
    Grow a cluster starting from a seed point.

    Parameters
    ----------
    D_spatial : (N, N) ndarray
        Matrix of distances between points.
    D_temporal : (N, N) ndarray
        Matrix of distances between times.
    labels : (N,) ndarray
        Array of cluster labels.
    set_neighbours : set
        Set of indices for neighbours of the seed point.
    label_cluster : int
        Label of the current cluster.
    eps_spatial : float
        Maximum distance between two points for one to be
        considered in the neighbourhood of the other.
    eps_temporal : float
        Maximum distance between two times for one to be
        considered in the neighbourhood of the other.
    min_pts : int
        Number of points in a neighbourhood for a point to be considered
        a core point.

    Examples
    --------
    >>> points = [[0, 0], [1, 0], [2, 0], [0, 5], [1, 5], [2, 5]]

    >>> idx_pt, label = 0, 1
    >>> eps_spatial, eps_temporal, min_pts = 1, 1, 2

    >>> D_spatial = cdist(points, points)
    >>> D_temporal = np.zeros_like(D_spatial)

    >>> labels = np.zeros(len(points))

    >>> set_neighbours = region_query_st(D_spatial, D_temporal, eps_spatial, eps_temporal, idx_pt)

    >>> grow_cluster_st(D_spatial, D_temporal, labels, set_neighbours, label, eps_spatial, eps_temporal, min_pts)

    >>> labels
    array([1., 1., 1., 0., 0., 0.])

    """
    # Initialize a queue with the current neighbourhood.
    queue_search: Any = Queue()

    for i in set_neighbours:
        queue_search.put(i)

    while not queue_search.empty():

        # Consider the next point in the queue.
        idx_next = queue_search.get()

        label_next = labels[idx_next]

        if label_next == -1:
            # This neighbour was labelled as noise.
            # It is now a border point of the cluster.
            labels[idx_next] = label_cluster

        elif label_next == 0:
            # The neighbour was unclaimed.
            # Add the next point to the cluster.
            labels[idx_next] = label_cluster

            set_neighbours_next = region_query_st(
                D_spatial, D_temporal, eps_spatial, eps_temporal, idx_next
            )

            if len(set_neighbours_next) >= min_pts:
                # The next point is a core point.
                # Add its neighbourhood to the queue to be searched.
                for i in set_neighbours_next:
                    queue_search.put(i)
Ejemplo n.º 39
0
class XGetter(Thread):
    def __init__(self, selection='CLIPBOARD'):
        super().__init__(name='klembord XGetter', daemon=True)
        self.selection = selection
        self._break = False
        self.inbox = Queue()
        self.initX()
        self.start()

    def initX(self):
        self.display = display.Display()

        # ATOMS
        self.SELECTION = self.display.intern_atom(self.selection)

        self.window = self.display.screen().root.create_window(
            0, 0, 1, 1, 0, X.CopyFromParent)
        self.window.set_wm_name('klembord XGetter window')

    def killX(self):
        self.window.destroy()
        self.display.close()

    def processEvent(self, xevent):
        try:
            target = self.display.get_atom_name(xevent.target)
        except BadAtom as e:
            ErrorReporter.print(e)
            return
        if target == 'TARGETS':
            try:
                target_atoms = self.window.get_full_property(
                    xevent.target, Xatom.ATOM).value
            except Exception as e:
                ErrorReporter.print(e)
                return
            data = []
            for atom in target_atoms:
                try:
                    data.append(self.display.get_atom_name(atom))
                except BadAtom as e:
                    ErrorReporter.print(e)
            data = tuple(data)
        else:
            try:
                prop = self.window.get_full_property(xevent.target,
                                                     xevent.target)
            except Exception as e:
                ErrorReporter.print(e)
                return
            if prop:
                data = prop.value
                if isinstance(data, str):
                    data = data.encode()
                else:
                    data = bytes(data)
            else:
                data = None
        self.inbox.put_nowait((target, data))

    def run(self):
        while True:
            if self._break:
                self.killX()
                break
            if self.display.pending_events():
                xevent = self.display.next_event()
                if (xevent.type == X.SelectionNotify
                        and xevent.selection == self.SELECTION
                        and xevent.requestor == self.window):
                    self.processEvent(xevent)
            time.sleep(0.005)

    def get(self, targets):
        content = {}
        try:
            self.display.flush()
        except Exception as e:
            ErrorReporter.print(e)
            raise BrokenConnection('Flushing events failed') from e
        try:
            owner = self.display.get_selection_owner(self.SELECTION)
        except BadAtom as e:
            ErrorReporter.print(e)
            raise BrokenConnection('Bad selection atom') from e
        if owner != X.NONE:
            for target in targets:
                target_atom = self.display.intern_atom(target)
                selection_request = event.SelectionRequest(
                    owner=owner,
                    requestor=self.window,
                    selection=self.SELECTION,
                    target=target_atom,
                    property=target_atom,
                    time=X.CurrentTime,
                )
                owner.send_event(selection_request, onerror=errHandler)
                try:
                    self.display.flush()
                except Exception as e:
                    ErrorReporter.print(e)
                    raise BrokenConnection('Flushing events failed') from e
                if errHandler.get_error():
                    raise BrokenConnection('Sending event failed')
            now = time.monotonic()
            while self.inbox.empty():
                if (time.monotonic() - now) >= 0.05:
                    break
                time.sleep(0.005)
            now = time.monotonic()
            while not self.inbox.empty():
                if (time.monotonic() - now) >= 0.05:
                    break
                try:
                    target, data = self.inbox.get_nowait()
                    self.inbox.task_done()
                except Empty:
                    break
                content[target] = data
        for target in targets:
            if target not in content:
                content[target] = None
        return content

    def exit(self):
        self._break = True
        # self.join()
        time.sleep(0.005)
Ejemplo n.º 40
0
    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36',
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0"
]

print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))


f = open("job_update.txt", "w+", encoding="utf-8")
f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n')
f.close()

fp = open("job_update.txt", "a+", encoding="utf-8")


q = cursor_query()

jobs_queue = Queue()
save_num = 0

pool = MyThreadPool()
pool.addthread(queue=q, size=18,func=get_job_detail)
pool.addthread(queue=jobs_queue, size=1,func=insertDB)
pool.startAll()
pool.joinAll()


db_urllist.close()

print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), "结束")
Ejemplo n.º 41
0
                    for client in clients:
                        client.sendMessage(item)
                time.sleep(3)
            except Queue.Empty:
                pass

    def handleConnected(self):
        print("Client joined")
        clients.append(self)

    def handleClose(self):
        print("Client Left")
        clients.remove(self)


q = Queue()
thread1 = twitterThread(1, q)
thread1.start()

server = SimpleWebSocketServer('', 8888, SimpleSocket(q))
server.serveforever()

# ----------------------------------------------------------------------------------------------------------------------

# class clientThread (threading.Thread):
#   def __init__(self, threadID):
#       threading.Thread.__init__(self)
#       self.threadID = threadID
#   def run(self):
#       import asyncio
#       import websockets
from datetime import datetime
from datetime import timedelta

def keyboardInterruptHandler(signum, frame):
    logger = logging.getLogger()
    logger.info('\nKeyboard Interrupt\n')
    sys.exit(0)
signal.signal(signal.SIGINT, keyboardInterruptHandler)

# configure logging
format = '%(levelname)s - %(pathname)s - %(lineno)s - %(asctime)s - %(message)s'
logging.basicConfig(level=logging.DEBUG, format=format)
logging.captureWarnings(True)

telnet_lock = Lock()
pypoPush_q = Queue()


pypoLiq_q = Queue()
liq_queue_tracker = {
        "s0": None,
        "s1": None,
        "s2": None,
        "s3": None,
        }

#dummy_telnet_liquidsoap = DummyTelnetLiquidsoap(telnet_lock, logging)
dummy_telnet_liquidsoap = TelnetLiquidsoap(telnet_lock, logging, \
        "localhost", \
        1234)
Ejemplo n.º 43
0
class RGBLed():
    def __init__(self, data_pin, pixel_count, state=None):
        print("RGB_INIT")
        self._pixels = neopixel.NeoPixel(data_pin, pixel_count)
        self._pixels.brightness = 0
        self._color = (0, 0, 0)
        self._brightness_queue = Queue(0)
        self._color_queue = Queue(0)
        self._transition_thread = None
        self._brightness_thread = None
        self._max_brightness = 0.7
        self._doc = None
        if (state):
            self.set_state(state)

    @property
    def brightness(self):
        return self._pixels.brightness

    @brightness.setter
    def brightness(self, target_brightness=None):
        if target_brightness is None:
            return
        if (self._pixels.brightness == target_brightness):
            return
        if target_brightness > self._max_brightness:
            target_brightness = self._max_brightness
        if target_brightness < 0:
            target_brightness = 0

        try:
            self._brightness_queue.put(target_brightness)
        except queue.Full as error:
            print(error)
        if not (self._brightness_thread and self._brightness_thread.is_alive()):
            self._brightness_thread = threading.Thread(
                target=self._change_brightness, daemon=True)
            self._brightness_thread.start()

        # print("Brightness target: ", target_brightness)
        # self._brightness_thread = threading.Thread(
        #     target=self._change_brightness, args=(self._pixels.brightness, target_brightness))
        # self._brightness_thread.start()

    @property
    def color(self):
        return self._color

    @color.setter
    def color(self, target_color=None):
        if target_color is None:
            return
        if not isinstance(target_color, tuple):
            target_color = self._hex_to_rgb(target_color)
        if (self._color == target_color):
            return

        try:
            self._color_queue.put(target_color)
        except queue.Full as error:
            print(error)

        if not (self._transition_thread and self._transition_thread.is_alive()):
            self._transition_thread = threading.Thread(
                target=self._change_color, daemon=True)
            self._transition_thread.start()

    def _hex_to_rgb(self, hex):
        h = hex.lstrip('#')
        return tuple(int(h[i:i+2], 16) for i in (0, 2, 4))

    def _change_color(self):
        while not self._color_queue.empty():
            color_from = self._color
            target_color = self._color_queue.get()
            colors = np.int_(np.linspace(color_from, target_color, 100))
            for color in colors:
                self._pixels.fill(tuple(color))
                self._color = tuple(color)
                sleep(0.005)
            self._color_queue.task_done()
        return

    def _change_brightness(self):
        while not self._brightness_queue.empty():
            target_brightness = self._brightness_queue.get()
            brightness_from = self._pixels.brightness,
            brightness_space = np.linspace(
                brightness_from, target_brightness, 50)
            for brightness in brightness_space:
                self._pixels.brightness = brightness[0]
                sleep(0.005)
            self._brightness_queue.task_done()
        return

    def set_state(self, state, document):
        print("SETTING STATE>>", state['color']['spectrumRgb'])
        if state['color']['spectrumRgb'] != None:
            self.color = self._hex_to_rgb(state['color']['spectrumRgb'])
        if 'on' in state:
            if state['on'] == False:
                self.brightness = 0
                return

        if state['brightness'] != None:
            self.brightness = state['brightness']/100*self._max_brightness

        if document:
            self._doc = document
Ejemplo n.º 44
0
class XSetter(Thread):
    def __init__(self, selection='CLIPBOARD', reset=None):
        super().__init__(name='klembord XSetter', daemon=True)
        self.selection = selection
        self.reset = reset
        self._break = False
        self.save_targets = []
        self.outbox = Queue()
        self.requests = Queue()
        self.content_set = False
        self.initX()
        self.eventLoop = Thread(
            target=self.processEvents,
            name='klembord XSetter event loop',
            daemon=True,
        )
        self.start()
        self.eventLoop.start()

    def initX(self):
        self.display = display.Display()

        # ATOMS
        self.SELECTION = self.display.intern_atom(self.selection)
        self.TARGETS = self.display.intern_atom('TARGETS')
        self.SAVE_TARGETS = self.display.intern_atom('SAVE_TARGETS')
        self.CLIPBOARD_MANAGER = self.display.intern_atom('CLIPBOARD_MANAGER')
        self.ST_PROPERTY = self.display.intern_atom('KLEMBORD_SELECTION')
        self.MULTIPLE = self.display.intern_atom('MULTIPLE')

        self.window = self.display.screen().root.create_window(
            0, 0, 1, 1, 0, X.CopyFromParent)
        self.window.set_wm_name('klembord XSetter')

        self.selection_clear = event.SelectionClear(
            window=self.window,
            atom=self.SELECTION,
            time=X.CurrentTime,
        )

    def killX(self):
        self.window.destroy()
        self.display.close()

    def run(self):
        def serve():
            while True:
                xevent = self.requests.get()
                self.requests.task_done()
                if xevent is None:
                    break
                elif xevent.type == X.SelectionRequest:
                    client_prop = process_request(xevent.requestor,
                                                  xevent.property,
                                                  xevent.target)
                    selection_notify = event.SelectionNotify(
                        time=xevent.time,
                        requestor=xevent.requestor,
                        selection=xevent.selection,
                        target=xevent.target,
                        property=client_prop,
                    )
                    xevent.requestor.send_event(selection_notify,
                                                onerror=errHandler)
                    try:
                        self.display.flush()
                    except Exception as e:
                        ErrorReporter.print(e)
                        self.reset()
                        break
                elif xevent.type == X.SelectionClear:
                    while not self.requests.empty():
                        try:
                            self.requests.get_nowait()
                            self.requests.task_done()
                        except Empty:
                            break
                    self.content_set = False
                    break
                elif xevent.type == X.SelectionNotify and xevent.property == X.NONE:
                    print('Failed to transfer ownership to Clipboard Manager',
                          file=sys.stderr)
                    break

        def process_request(client, property, target):
            prop_set = True
            if property == X.NONE:
                client_prop = target
            else:
                client_prop = property
            if target == self.TARGETS:
                prop_value = [self.TARGETS, self.SAVE_TARGETS]
                prop_value += [t for t, data in content.items() if data]
                prop_type = Xatom.ATOM
                prop_format = 32
            elif target in content:
                data = content[target]
                if isinstance(data, str):
                    prop_value = data.encode()
                elif isinstance(data, ByteString):
                    prop_value = data
                else:
                    client_prop = X.NONE
                prop_type = target
                prop_format = 8
            elif target == self.MULTIPLE:
                try:
                    wanted_prop = client.get_full_property(
                        client_prop, X.AnyPropertyType)
                except Exception as e:
                    ErrorReporter.print(e)
                    self.reset()
                    return
                if wanted_prop:
                    wanted = [
                        wanted_prop.value[i:i + 2]
                        for i in range(0, len(wanted_prop.value), 2)
                    ]
                    for target, prop in wanted:
                        process_request(client, prop, target)
                    prop_set = False
                else:
                    client_prop = X.NONE
            else:
                client_prop = X.NONE
            if client_prop != X.NONE and prop_set:
                client.change_property(
                    client_prop,
                    prop_type,
                    prop_format,
                    prop_value,
                    onerror=errHandler,
                )
                try:
                    self.display.flush()
                except Exception as e:
                    ErrorReporter.print(e)
                    self.reset()
                    return
                if errHandler.get_error():
                    self.reset()
                    return
            return client_prop

        while True:
            content = self.outbox.get()
            self.outbox.task_done()
            if content is None:
                break
            self.content_set = True
            self.window.set_selection_owner(self.SELECTION,
                                            X.CurrentTime,
                                            onerror=errHandler)
            try:
                self.display.flush()
            except Exception as e:
                ErrorReporter.print(e)
                self.reset()
                break
            if errHandler.get_error():
                self.reset()
                break
            try:
                current_owner = self.display.get_selection_owner(
                    self.SELECTION)
            except (BadAtom, RuntimeError, TypeError) as e:
                ErrorReporter.print(e)
                self.reset()
                break
            if current_owner == self.window:
                server = Thread(target=serve,
                                name='klembord XSetter server',
                                daemon=True)
                server.start()

    def processEvents(self):
        while True:
            if self._break:
                self.killX()
                break
            try:
                if self.display.pending_events():
                    xevent = self.display.next_event()
                    if (xevent.type == X.SelectionRequest
                            and xevent.owner == self.window
                            and xevent.selection == self.SELECTION):
                        self.requests.put_nowait(xevent)
                    elif (xevent.type == X.SelectionClear
                          and xevent.window == self.window
                          and xevent.atom == self.SELECTION):
                        self.requests.put_nowait(xevent)
                    elif (xevent.type == X.SelectionNotify
                          and xevent.selection == self.CLIPBOARD_MANAGER
                          and xevent.target == self.SAVE_TARGETS):
                        self.requests.put_nowait(xevent)
                time.sleep(0.005)
            except Exception as e:
                ErrorReporter.print(e)
                self.reset()
                break

    def set(self, content):
        self.save_targets.clear()
        content_atoms = {}
        for target, data in content.items():
            if not isinstance(data, (str, ByteString, type(None))):
                raise TypeError('Unsupported data type:\n{}'.format(
                    repr(data)))
            with ThreadingTimeout(0.05) as timeout:
                try:
                    target_atom = self.display.intern_atom(target)
                except RuntimeError as e:
                    ErrorReporter.print(e)
                    raise BrokenConnection('Failed to intern atom') from e
            if timeout.state == timeout.TIMED_OUT:
                raise BrokenConnection('Interning atoms timed out')
            if data:
                self.save_targets.append(target_atom)
            content_atoms[target_atom] = data
        if self.content_set:
            with ThreadingTimeout(0.05) as timeout:
                self.window.send_event(self.selection_clear,
                                       onerror=errHandler)
                try:
                    self.display.flush()
                except Exception as e:
                    ErrorReporter.print(e)
                    raise BrokenConnection('Failed to flush events') from e
                if errHandler.get_error():
                    raise BrokenConnection('Failed to send events')
            if timeout.state == timeout.TIMED_OUT:
                raise BrokenConnection('Sending event timed out')
        self.outbox.put_nowait(content_atoms)

    def store(self):
        if self.content_set:
            try:
                clipboardManager = self.display.get_selection_owner(
                    self.CLIPBOARD_MANAGER)
            except BadAtom as e:
                ErrorReporter.print(e)
                raise BrokenConnection('Broken Clipboard Manager atom') from e
            if clipboardManager != X.NONE:
                self.window.change_property(
                    self.ST_PROPERTY,
                    Xatom.ATOM,
                    32,
                    self.save_targets,
                    onerror=errHandler,
                )
                try:
                    self.display.flush()
                except Exception as e:
                    ErrorReporter.print(e)
                    raise BrokenConnection('Failed to flush events') from e
                if errHandler.get_error():
                    raise BrokenConnection('Failed to change window property')
                self.window.convert_selection(
                    self.CLIPBOARD_MANAGER,
                    self.SAVE_TARGETS,
                    self.ST_PROPERTY,
                    X.CurrentTime,
                    onerror=errHandler,
                )
                try:
                    self.display.flush()
                except Exception as e:
                    ErrorReporter.print(e)
                    raise BrokenConnection('Failed to flush events') from e
                if errHandler.get_error():
                    raise BrokenConnection('Failed to convert selection')
                self.save_targets.clear()

    def clear(self):
        self.save_targets.clear()
        self.content_set = True
        self.outbox.put_nowait({})
        self.window.send_event(self.selection_clear, onerror=errHandler)
        try:
            self.display.flush()
        except Exception as e:
            ErrorReporter.print(e)
            raise BrokenConnection('Failed to flush events') from e
        if errHandler.get_error():
            raise BrokenConnection('Failed to send events')

    def exit(self):
        self._break = True
        self.outbox.put_nowait(None)
        self.requests.put_nowait(None)
        # self.join()
        # self.eventLoop.join()
        time.sleep(0.005)
# -*- coding: utf-8 -*-
import socket
import threading
import sys
import subprocess
import os
from datetime import datetime
import cv2
import numpy as np
import math
from queue import Queue
from time import sleep
import time
import pandas as pd

queue = Queue()  #쓰레드간 작업 공유, 서버와 opencv 쓰레드 간의 데이터 공유
dis_queue = Queue()  # 거리 계산 쓰레드와 opencv 쓰레드 사이 대기 큐
s_queue = Queue()  # 거리 계산 쓰레드와 서버 쓰레드 사이의 데이터 공유
camera_queue = Queue()  # 소켓통신 쓰레드로부터 카메라 각도 정보를 받아오는 큐
# value_queue = Queue() # 거리 계산 시 필요한 x,y 픽셀값 공유
df = pd.read_excel(
    'C:\Users\JJungs\Documents\GitHub\Senior_Project_Konkuk\FinalProject\distance.xlsx'
)  #거리 데이터 정보 데이터프레임 생성


def mouse_callback(event, x, y, flags, param):

    if event == cv2.EVENT_RBUTTONDOWN:

        my_str = "Back"
        queue.put(my_str)
Ejemplo n.º 46
0
class MinicapStream(object):
    __instance = None
    __mutex = threading.Lock()

    def __init__(self, ip, port):
        self.IP = ip
        self.PORT = int(port)
        self.PID = 0
        self.banner = Banner()
        self.minicap_socket = None
        self.read_image_stream_task = None

        self.push = None
        self.picture = Queue()
        self.__flag = True

    @staticmethod
    def get_builder(ip="127.0.0.1", port=1313):
        if (MinicapStream.__instance == None):
            MinicapStream.__mutex.acquire()
            if (MinicapStream.__instance == None):
                MinicapStream.__instance = MinicapStream(ip, port)
            MinicapStream.__mutex.release()
        return MinicapStream.__instance

    def get_ip(self):
        return self.IP

    def get_port(self):
        return self.PORT

    def get_queue(self):
        return self.picture

    def get_d(self):
        return self.picture.qsize()

    def start(self):
        self.minicap_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.minicap_socket.connect((self.IP, self.PORT))
        self.read_image_stream_task = threading.Thread(
            target=self.read_image_stream).start()

    def finish(self):
        self.__flag = False

    def read_image_stream(self):
        read_banner_bytes = 0
        banner_length = 2
        read_frame_bytes = 0
        frame_body_length = 0
        data_body = bytearray(b'')
        counter = 0

        while self.__flag:
            # L.info("Picture Queue : %s" % (self.get_d()))
            reallen = self.minicap_socket.recv(4096)
            length = len(reallen)
            if not length: continue
            cursor = 0
            while cursor < length:
                if read_banner_bytes < banner_length:
                    if read_banner_bytes == 0:
                        self.banner.version = bytes_to_int(reallen[cursor])
                    elif read_banner_bytes == 1:
                        banner_length = bytes_to_int(reallen[cursor])
                        self.banner.length = banner_length
                    elif read_banner_bytes in [2, 3, 4, 5]:
                        self.banner.pid += (bytes_to_int(reallen[cursor]) <<
                                            ((read_banner_bytes - 2) * 8)) >> 0
                    elif read_banner_bytes in [6, 7, 8, 9]:
                        self.banner.real_width += (
                            bytes_to_int(reallen[cursor]) <<
                            ((read_banner_bytes - 6) * 8)) >> 0
                    elif read_banner_bytes in [10, 11, 12, 13]:
                        self.banner.real_height += (
                            bytes_to_int(reallen[cursor]) <<
                            ((read_banner_bytes - 10) * 8)) >> 0
                    elif read_banner_bytes in [14, 15, 16, 17]:
                        self.banner.virtual_width += (
                            bytes_to_int(reallen[cursor]) <<
                            ((read_banner_bytes - 14) * 8)) >> 0
                    elif read_banner_bytes in [18, 19, 20, 21]:
                        self.banner.virtual_height += (
                            bytes_to_int(reallen[cursor]) <<
                            ((read_banner_bytes - 18) * 8)) >> 0
                    elif read_banner_bytes == 22:
                        self.banner.orientation = bytes_to_int(
                            reallen[cursor]) * 90
                    elif read_banner_bytes == 23:
                        self.banner.quirks = bytes_to_int(reallen[cursor])
                    cursor += 1
                    read_banner_bytes += 1
                    if read_banner_bytes == banner_length:
                        L.debug(self.banner)
                elif read_frame_bytes < 4:
                    frame_body_length = frame_body_length + (
                        (bytes_to_int(reallen[cursor]) <<
                         (read_frame_bytes * 8)) >> 0)
                    cursor += 1
                    read_frame_bytes += 1
                else:
                    if length - cursor >= frame_body_length:
                        data_body = data_body + reallen[cursor:(
                            cursor + frame_body_length)]
                        if bytes_to_int(data_body[0]) != 0xFF or bytes_to_int(
                                data_body[1]) != 0xD8:
                            return
                        self.picture.put(data_body)
                        if self.get_d() > MAX_SIZE: self.picture.get()
                        cursor += frame_body_length
                        frame_body_length = 0
                        read_frame_bytes = 0
                        data_body = bytearray(b'')
                        counter += 1
                    else:
                        data_body = data_body + reallen[cursor:length]
                        frame_body_length -= length - cursor
                        read_frame_bytes += length - cursor
                        cursor = length
class MesosExecutor(BaseExecutor, LoginMixin):
    """
    MesosExecutor allows distributing the execution of task
    instances to multiple mesos workers.

    Apache Mesos is a distributed systems kernel which abstracts
    CPU, memory, storage, and other compute resources away from
    machines (physical or virtual), enabling fault-tolerant and
    elastic distributed systems to easily be built and run effectively.
    See http://mesos.apache.org/
    """
    def start(self):
        self.task_queue = Queue()
        self.result_queue = Queue()
        framework = mesos_pb2.FrameworkInfo()
        framework.user = ''

        if not configuration.get('mesos', 'MASTER'):
            self.log.error("Expecting mesos master URL for mesos executor")
            raise AirflowException(
                "mesos.master not provided for mesos executor")

        master = configuration.get('mesos', 'MASTER')

        framework.name = get_framework_name()

        if not configuration.get('mesos', 'TASK_CPU'):
            task_cpu = 1
        else:
            task_cpu = configuration.getint('mesos', 'TASK_CPU')

        if not configuration.get('mesos', 'TASK_MEMORY'):
            task_memory = 256
        else:
            task_memory = configuration.getint('mesos', 'TASK_MEMORY')

        if configuration.getboolean('mesos', 'CHECKPOINT'):
            framework.checkpoint = True

            if configuration.get('mesos', 'FAILOVER_TIMEOUT'):
                # Import here to work around a circular import error
                from airflow.models import Connection

                # Query the database to get the ID of the Mesos Framework, if available.
                conn_id = FRAMEWORK_CONNID_PREFIX + framework.name
                session = Session()
                connection = session.query(Connection).filter_by(
                    conn_id=conn_id).first()
                if connection is not None:
                    # Set the Framework ID to let the scheduler reconnect with running tasks.
                    framework.id.value = connection.extra

                framework.failover_timeout = configuration.getint(
                    'mesos', 'FAILOVER_TIMEOUT')
        else:
            framework.checkpoint = False

        self.log.info(
            'MesosFramework master : %s, name : %s, cpu : %s, mem : %s, checkpoint : %s',
            master, framework.name, str(task_cpu), str(task_memory),
            str(framework.checkpoint))

        implicit_acknowledgements = 1

        if configuration.getboolean('mesos', 'AUTHENTICATE'):
            if not configuration.get('mesos', 'DEFAULT_PRINCIPAL'):
                self.log.error(
                    "Expecting authentication principal in the environment")
                raise AirflowException(
                    "mesos.default_principal not provided in authenticated mode"
                )
            if not configuration.get('mesos', 'DEFAULT_SECRET'):
                self.log.error(
                    "Expecting authentication secret in the environment")
                raise AirflowException(
                    "mesos.default_secret not provided in authenticated mode")

            credential = mesos_pb2.Credential()
            credential.principal = configuration.get('mesos',
                                                     'DEFAULT_PRINCIPAL')
            credential.secret = configuration.get('mesos', 'DEFAULT_SECRET')

            framework.principal = credential.principal

            driver = mesos.native.MesosSchedulerDriver(
                AirflowMesosScheduler(self.task_queue, self.result_queue,
                                      task_cpu, task_memory), framework,
                master, implicit_acknowledgements, credential)
        else:
            framework.principal = 'Airflow'
            driver = mesos.native.MesosSchedulerDriver(
                AirflowMesosScheduler(self.task_queue, self.result_queue,
                                      task_cpu, task_memory), framework,
                master, implicit_acknowledgements)

        self.mesos_driver = driver
        self.mesos_driver.start()

    def execute_async(self, key, command, queue=None):
        self.task_queue.put((key, command))

    def sync(self):
        while not self.result_queue.empty():
            results = self.result_queue.get()
            self.change_state(*results)

    def end(self):
        self.task_queue.join()
        self.mesos_driver.stop()
Ejemplo n.º 48
0
class Connection:
    """
    proxy sqlite3.connection
    """
    def __init__(
            self,
            database,
            loop=None,
            executor=None,
            timeout=5,
            echo=False,
            check_same_thread=False,
            isolation_level='',
            sqlite=sqlite3,
            **kwargs
    ):
        if check_same_thread:
            logger.warning(
                'check_same_thread is True '
                'sqlite on one Thread run'
            )
        self._sqlite = sqlite
        self._database = database
        self._loop = loop or asyncio.get_event_loop()
        self._kwargs = kwargs
        self._executor = executor
        self._echo = echo
        self._timeout = timeout
        self._isolation_level = isolation_level
        self._check_same_thread = check_same_thread
        self._conn = None
        self._closed = False
        if check_same_thread:
            self._thread_lock = asyncio.Lock(loop=loop)
            self.tx_queue = Queue()
            self.rx_queue = Queue()
            self.tx_event = Event()
            self.rx_event = Event()
            self._thread = SqliteThread(
                self.tx_queue,
                self.rx_queue,
                self.tx_event,
                self.rx_event
            )
            self._thread.start()
            self._threading = True
        else:
            self._thread = None
            self._threading = False

    def __enter__(self):
        """
        普通上下文处理
        """
        return self

    def __exit__(self, exc_type, exc, tbs):
        """
        普通上下文处理
        """
        self._loop.call_soon_threadsafe(self.close)

    def _log(self, level, message, *args):
        """
        日志处理
        """
        if self._echo:
            log_fun = getattr(logger, level)
            log_fun(message, *args)

    @asyncio.coroutine
    def _execute(self, func, *args, **kwargs):
        """
        把同步转为async运行
        """
        if self._closed:
            raise TypeError('connection is close')
        func = partial(func, *args, **kwargs)
        if self._check_same_thread:
            future = yield from self._async_thread_execute(func)
        else:
            future = yield from self._loop.run_in_executor(
                self._executor,
                func
            )
        return future

    @asyncio.coroutine
    def async_execute(self, func, *args, **kwargs):
        """
        把同步转为async运行
        """
        return (yield from self._execute(func, *args, **kwargs))

    def sync_execute(self, func, *args, **kwargs):
        """
        同步执行方法
        """
        if self._closed:
            raise TypeError('connection is close')
        func = partial(func, *args, **kwargs)
        if self._check_same_thread:
            return self._thread_execute(func)
        return func()

    @asyncio.coroutine
    def _close_thread(self):
        future = yield from self._async_thread_execute('close')
        self._threading = False
        self._thread = None
        return future

    @asyncio.coroutine
    def _async_thread_execute(self, func):
        """
        通过asyncio的锁每次只执行一个
        """
        with (yield from self._thread_lock):
            func = partial(self._thread_execute, func)
            future = yield from self._loop.run_in_executor(
                self._executor,
                func
            )
        return future

    def _thread_execute(self, func):
        """
        通知线程执行任务
        """
        self.tx_queue.put(func)
        self.tx_event.set()
        self.rx_event.wait()
        self.rx_event.clear()
        result = self.rx_queue.get_nowait()
        if isinstance(result, Exception):
            # pragma: no cover
            raise result
        return result

    @asyncio.coroutine
    def _connect(self):
        """
        async连接,必须使用多线程模式
        """
        func = yield from self._execute(
            self._sqlite.connect,
            self._database,
            timeout=self._timeout,
            isolation_level=self._isolation_level,
            check_same_thread=self._check_same_thread,
            **self._kwargs
        )
        self._conn = func
        self._log(
            'debug',
            'connect-> "%s" ok',
            self._database
        )

    @asyncio.coroutine
    def connect(self):
        """
        connect
        """
        return (yield from self._connect())

    @property
    def echo(self):
        """
        日志输出开关
        """
        return self._echo

    @property
    def loop(self):
        """
        连接使用的loop
        """
        return self._loop

    @property
    def timeout(self):
        """
        超时时间
        """
        return self._timeout

    @property
    def closed(self):
        """
        是否已关闭连接
        """
        return self._closed

    @property
    def autocommit(self):
        """
        是否为自动commit
        """
        return self._conn.isolation_level is None

    @property
    def isolation_level(self):
        """
        智能,自动commit
        """
        return self._conn.isolation_level

    @isolation_level.setter
    def isolation_level(self, value: str) -> None:
        """
        事物等级
        """
        if self._check_same_thread:
            func = partial(self._sync_setter, 'isolation_level', value)
            self._thread_execute(func)
        else:
            self._conn.isolation_level = value

    @property
    def row_factory(self):
        """
        row_factory
        """
        return self._conn.row_factory

    def _sync_setter(self, field, value):
        """
        同步设置属性
        """
        setattr(self._conn, field, value)

    @row_factory.setter
    def row_factory(self, value):
        """
        set row_factory
        """
        if self._check_same_thread:
            func = partial(self._sync_setter, 'row_factory', value)
            self._thread_execute(func)
        else:
            self._conn.row_factory = value

    @property
    def text_factory(self):
        """
        text_factory
        """
        return self._conn.text_factory

    @text_factory.setter
    def text_factory(self, value):
        """
        set text_factory
        """
        if self._check_same_thread:
            func = partial(self._sync_setter, 'text_factory', value)
            self._thread_execute(func)
        else:
            self._conn.text_factory = value

    def _create_cursor(self, cursor):
        """
        创建代理cursor
        """
        return Cursor(cursor, self, self._echo)

    def _create_context_cursor(self, coro):
        """
        创建支持await上下文cursor
        """
        return _LazyloadContextManager(coro, self._create_cursor)

    def cursor(self):
        """
        转换为上下文模式
        """
        coro = self._execute(self._conn.cursor)
        return self._create_context_cursor(coro)

    @asyncio.coroutine
    def close(self):
        """
        关闭
        """
        if self._closed or self._conn is None:
            return
        yield from self._execute(self._conn.close)
        if self._check_same_thread:
            yield from self._close_thread()
            self._thread = None
        self._closed = True
        self._log(
            'debug',
            'close-> "%s" ok',
            self._database
        )

    def execute(
            self,
            sql,
            parameters=None,
    ):
        """
        Helper to create a cursor and execute the given query.
        """
        self._log(
            'info',
            'connection.execute->\n  sql: %s\n  args: %s',
            sql,
            str(parameters)
        )
        if parameters is None:
            parameters = []
        coro = self._execute(self._conn.execute, sql, parameters)
        return self._create_context_cursor(coro)

    @asyncio.coroutine
    def executemany(
            self,
            sql,
            parameters,
    ):
        """
        Helper to create a cursor and execute the given multiquery.
        """
        self._log(
            'info',
            'connection.executemany->\n  sql: %s\n  args: %s',
            sql,
            str(parameters)
        )
        coro = self._execute(
            self._conn.executemany,
            sql,
            parameters
        )
        return self._create_context_cursor(coro)

    def executescript(
            self,
            sql_script,
    ):
        """
        Helper to create a cursor and execute a user script.
        """
        self._log(
            'info',
            'connection.executescript->\n  sql_script: %s',
            sql_script
        )
        coro = self._execute(
            self._conn.executescript,
            sql_script
        )
        return self._create_context_cursor(coro)

    def sync_close(self):
        """
        同步关闭连接
        """
        self.__del__()

    def __del__(self):
        """
        关闭连接清理线程
        """
        if not self._closed:
            if self._check_same_thread:
                if self._thread:
                    self._thread_execute(self._conn.close)
                    self._thread_execute('close')
                    self._thread = None
                    self._thread_lock = None
                    self.tx_queue = None
                    self.rx_queue = None
                    self.tx_event = None
                    self.rx_event = None
                else:
                    # pragma: no cover
                    pass
            else:
                self._conn.close()
            self._conn = None
            self._sqlite = None
            database = self._database
            self._database = None
            self._loop = None
            self._kwargs = None
            self._executor = None
            self._closed = True
            self._log(
                'debug',
                '__del__ close-> "%s" ok',
                database
            )
Ejemplo n.º 49
0
class RabbitMQListener:

    def __init__(
        self, rmq_config, binding_keys=("com.#.InstrumentMessage",),
        tick_dict=ALPHAMONGO_TICK_DICT, message_queue=None, watch_symbols=None, logger=None, test_mode=False
        ):

        self._host = rmq_config['host']
        self._port = int(rmq_config['port'])
        self._user = rmq_config['user']
        self._password = rmq_config['password']
        self._exchange = EXCHANGES_DICT[rmq_config['exchange']]
        self._binding_keys = binding_keys
        self._tick_dict = tick_dict
        self._test_mode = test_mode

        self.msg_queue = Queue() if message_queue is None else message_queue

        self._connection = None
        self._channel = None
        self._queue_name = None
        self._customer_tag = None
        self._schema_dict = {}

        self._message_listener_thread = None
        self._thread_lock = Lock()
        self._is_listening = False  # thread locked

        self._watch_symbols = watch_symbols

        self._logger = logger if logger is not None else get_logger('rmq_listener')

    def _connect_2_rabbit(self):
        parameters = None
        try:
            parameters = pika.ConnectionParameters(
                host=self._host, port=self._port, virtual_host='/', connection_attempts=3,
                retry_delay=30, credentials=pika.PlainCredentials(self._user, self._password))
            self._connection = pika.BlockingConnection(parameters)
            self._channel = self._connection.channel()
            self._channel.exchange_declare(exchange=self._exchange, exchange_type='topic')
            result = self._channel.queue_declare(exclusive=True)
            self._queue_name = result.method.queue
            for binding_key in self._binding_keys:
                self._channel.queue_bind(exchange=self._exchange, queue=self._queue_name, routing_key=binding_key)
            self._logger.info("RabbitMQ connected successfully! Parameters: {}".format(parameters))
        except BaseException as e:
            self._logger.error('RabbitMQ _connect_2_rabbit Error: Parameters: {}'.format(parameters))
            self._logger.error('RabbitMQ _connect_2_rabbit Error: {}'.format(e.__repr__()), exc_info=True)

    def _callback(self, ch, method, properties, body):
        schema_name = method.routing_key.rsplit('.', 1)[-1]
        if schema_name in self._schema_dict:
            schema = self._schema_dict[schema_name]
        else:
            schema = avro.schema.Parse(open('{}/{}.avro'.format(AVRO_SCHEMAS_PATH, schema_name)).read())
            self._schema_dict[schema_name] = schema
        buffer_reader = BytesIO(body)
        buffer_decoder = avro.io.BinaryDecoder(buffer_reader)
        datum_reader = avro.io.DatumReader(schema)
        msg = datum_reader.read(buffer_decoder)
        self._distribute_message(schema_name, msg)

    def _distribute_message(self, schema_name, msg):
        if schema_name == "InstrumentMessage":
            self._subscribe_instrument(msg)
        elif schema_name == "TradeMessage":
            self._subscribe_trade_message(msg)
        elif schema_name == "OrderStatusMessage":
            self._subscribe_order_status_message(msg)
        else:
            msg.clear()

    def _subscribe_instrument(self, instrument_msg):
        """
        Put to message Queue to be read by other threads.
        """
        if self._watch_symbols is not None and instrument_msg['symbol'].upper() not in self._watch_symbols:
            instrument_msg.clear()
            return
        translated_msg = {self._tick_dict[k] if k in self._tick_dict else k: v for k, v in instrument_msg.items()}
        self.msg_queue.put(translated_msg)
        if self._test_mode:
            #if instrument_msg['symbol'].upper() == 'RB1910':
            self._logger.info("\n{}".format(instrument_msg))
            self._logger.info("\n{}".format(self.msg_queue.get()))

    def _subscribe_trade_message(self, trade_msg):
        trade_msg.clear()

    def _subscribe_order_status_message(self, order_status_msg):
        order_status_msg.clear()

    def _start_consuming(self):
        while True:
            self._thread_lock.acquire()
            if not self._is_listening:
                self._thread_lock.release()
                break
            else:
                self._thread_lock.release()

            self._connect_2_rabbit()

            try:
                self._customer_tag = self._channel.basic_consume(self._callback, queue=self._queue_name, no_ack=True)
                self._channel.start_consuming()
            except BaseException as e:
                self._logger.error('RabbitMQ _start_consuming Error: {}'.format(e.__repr__()), exc_info=True)

    def _stop_consuming(self):
        try:
            self._thread_lock.acquire()
            self._is_listening = False
            self._thread_lock.release()

            # Sleep 2 second in case the output Queue interfacing other threads is deleted immediately after it is
            # just created. Some RabbitMQ object may still being constructed but not done yet, and some RabbitMQ
            # call will fail.
            time.sleep(2)

            try:
                self._channel.cancel()
                for binding_key in self._binding_keys:
                    self._channel.queue_unbind(exchange=self._exchange, queue=self._queue_name, routing_key=binding_key)
                self._channel.queue_delete(self._queue_name)
                self._channel.close()
            except AttributeError as e:
                self._logger.error('RabbitMQ _stop_consuming error: {}'.format(e.__repr__()), exc_info=True)
            try:
                self._connection.close()
            except AttributeError as e:
                self._logger.error('RabbitMQ _stop_consuming error: {}'.format(e.__repr__()), exc_info=True)
        except BaseException as e:
            self._logger.error('RabbitMQ _stop_consuming error: {}'.format(e.__repr__()), exc_info=True)

    def start_listener(self):
        self._message_listener_thread = Thread(target=self._start_consuming)
        self._thread_lock.acquire()
        self._is_listening = True
        self._thread_lock.release()
        self._message_listener_thread.start()
        self._logger.info("RabbitMQ listener thread started. Host={}:{}@{}:{}. Exchange={}. BindingKeys={}".format(
            self._user, self._password, self._host, self._port, self._exchange, self._binding_keys
        ))

    def stop_listener(self):
        if self._message_listener_thread is None or not self._message_listener_thread.isAlive():
            return
        self._stop_consuming()
        if self._message_listener_thread.isAlive():
            self._message_listener_thread.join()
        self._logger.info("RabbitMQ listener thread stopped.")
    def start(self):
        self.task_queue = Queue()
        self.result_queue = Queue()
        framework = mesos_pb2.FrameworkInfo()
        framework.user = ''

        if not configuration.get('mesos', 'MASTER'):
            self.log.error("Expecting mesos master URL for mesos executor")
            raise AirflowException(
                "mesos.master not provided for mesos executor")

        master = configuration.get('mesos', 'MASTER')

        framework.name = get_framework_name()

        if not configuration.get('mesos', 'TASK_CPU'):
            task_cpu = 1
        else:
            task_cpu = configuration.getint('mesos', 'TASK_CPU')

        if not configuration.get('mesos', 'TASK_MEMORY'):
            task_memory = 256
        else:
            task_memory = configuration.getint('mesos', 'TASK_MEMORY')

        if configuration.getboolean('mesos', 'CHECKPOINT'):
            framework.checkpoint = True

            if configuration.get('mesos', 'FAILOVER_TIMEOUT'):
                # Import here to work around a circular import error
                from airflow.models import Connection

                # Query the database to get the ID of the Mesos Framework, if available.
                conn_id = FRAMEWORK_CONNID_PREFIX + framework.name
                session = Session()
                connection = session.query(Connection).filter_by(
                    conn_id=conn_id).first()
                if connection is not None:
                    # Set the Framework ID to let the scheduler reconnect with running tasks.
                    framework.id.value = connection.extra

                framework.failover_timeout = configuration.getint(
                    'mesos', 'FAILOVER_TIMEOUT')
        else:
            framework.checkpoint = False

        self.log.info(
            'MesosFramework master : %s, name : %s, cpu : %s, mem : %s, checkpoint : %s',
            master, framework.name, str(task_cpu), str(task_memory),
            str(framework.checkpoint))

        implicit_acknowledgements = 1

        if configuration.getboolean('mesos', 'AUTHENTICATE'):
            if not configuration.get('mesos', 'DEFAULT_PRINCIPAL'):
                self.log.error(
                    "Expecting authentication principal in the environment")
                raise AirflowException(
                    "mesos.default_principal not provided in authenticated mode"
                )
            if not configuration.get('mesos', 'DEFAULT_SECRET'):
                self.log.error(
                    "Expecting authentication secret in the environment")
                raise AirflowException(
                    "mesos.default_secret not provided in authenticated mode")

            credential = mesos_pb2.Credential()
            credential.principal = configuration.get('mesos',
                                                     'DEFAULT_PRINCIPAL')
            credential.secret = configuration.get('mesos', 'DEFAULT_SECRET')

            framework.principal = credential.principal

            driver = mesos.native.MesosSchedulerDriver(
                AirflowMesosScheduler(self.task_queue, self.result_queue,
                                      task_cpu, task_memory), framework,
                master, implicit_acknowledgements, credential)
        else:
            framework.principal = 'Airflow'
            driver = mesos.native.MesosSchedulerDriver(
                AirflowMesosScheduler(self.task_queue, self.result_queue,
                                      task_cpu, task_memory), framework,
                master, implicit_acknowledgements)

        self.mesos_driver = driver
        self.mesos_driver.start()
Ejemplo n.º 51
0
import logging.config
log = logging.getLogger(__name__)

@unique
class ReportStatus(IntEnum):
	# Join Status 
	INFO = 3
	WARNING = 2
	ADMIN = 1
	UNKNOWN = 0

	@classmethod
	def has_value(cls, value):
		return value in cls._value2member_map_ 

report_queue = Queue(maxsize=0)

class Report():
	def __init__(self, level, message, from_uid, about_cid, about_uid, about_msgid, specific_message):
		if ReportStatus.has_value(level):
			self.level = level
		self.from_uid = from_uid
		self.about_cid = about_cid
		self.about_uid = about_uid
		self.about_msgid = about_msgid
		self.message = message
		self.specific_message = specific_message

# Push a report inside the Queue
def send_report(bot, level, message, from_uid, about_cid="", about_uid="", about_msgid="", specific_message=""):
	report = Report(level, message, from_uid, about_cid, about_uid, about_msgid, specific_message)
Ejemplo n.º 52
0
    def create_ssl_connection(self, address, hostname, cache_key, getfast=None, forward=None, **kwargs):
        def get_cache_sock(cache=None):
            if cache is None:
                cache = self.ssl_connection_cache.get(cache_key)
            try:
                while cache:
                    ctime, ssl_sock = cache.pop()
                    if self.check_connection_alive(ssl_sock, self.keeptime, ctime):
                        return ssl_sock
            except IndexError:
                pass

        def get_cache_sock_ex():
            if cache_key is None or '|' in hostname:
                return
            names = hostname.split('.')
            if len(names[-1]) == 2 and len(names[-2]) <= 3:
                if len(names) > 3:
                    del names[0]
            elif len(names) > 2:
                del names[0]
            chost = '.'.join(names)
            ckey = '%s:%s' % (chost, cache_key.partition(':')[-1])
            keys = tuple(self.ssl_connection_cache.keys())
            for key in keys:
                if key not in self.ssl_connection_cache or\
                         '|' in key or not key.endswith(ckey):
                    continue
                cache = self.ssl_connection_cache[key]
                sock = get_cache_sock(cache)
                if sock:
                    if key != cache_key:
                        logging.warning(
                            '%s create_ssl_connection %r 尝试复用 %r 连接,'
                            '站点 %r 可能配置了多个子域名和较少的 IP。\n'
                            '可以尝试在 iplist 配置列表:\tcdn_%s = %s\n'
                            '然后在自动规则中使用此列表:\t%s$ = cdn_%s',
                            sock.xip[0], hostname, key, chost, chost, chost, chost, chost)
                    return sock

        sock = get_cache_sock()
        if sock:
            return sock

        result = None
        host, port = address
        addresses = [(x, port) for x in dns[hostname]]
        for i in range(self.max_retry):
            addresseslen = len(addresses)
            if getfast and gae_testgwsiplist:
                #按线程数量获取排序靠前的 IP
                addresses.sort(key=self.get_ssl_connection_time)
                addrs = addresses[:autorange_threads + 1]
            else:
                if addresseslen > self.max_window:
                    addresses.sort(key=self.get_ssl_connection_time)
                    window = min((self.max_window + 1)//2 + min(i, 1), addresseslen)
                    addrs = addresses[:window] + random.sample(addresses[window:], self.max_window-window)
                else:
                    addrs = addresses
            queobj = Queue()
            for addr in addrs:
                start_new_thread(self._create_ssl_connection, (addr, cache_key, host, queobj, forward, get_cache_sock))
            addrslen = len(addrs)
            for n in range(addrslen):
                result = queobj.get()
                if isinstance(result, Exception):
                    addr = result.xip
                    if addresseslen > 1:
                        #临时移除 badip
                        try:
                            addresses.remove(addr)
                            addresseslen -= 1
                        except ValueError:
                            pass
                    if i == n == 0:
                        if isinstance(result, LimiterFull):
                            sock = get_cache_sock_ex()
                            if sock:
                                return sock
                        else:
                            #only output first error
                            logging.warning('%s _create_ssl_connection %r 返回 %r,重试', addr[0], host, result)
                else:
                    if addrslen - n > 1:
                        cache = self.ssl_connection_cache[cache_key]
                        start_new_thread(self._cache_connection, (cache, addrslen-n-1, queobj))
                    return result
        if result:
            raise result
Ejemplo n.º 53
0
                mv = sg.select_move()
            else:
                mv = sg.moves[int(len(sg.moves) * random.random())]
            sg.play_move(mv)
            sg.turn = 1 - sg.turn
            count += 1
        num_white, num_black = sg.final_result()
        if num_white > num_black: w += 1
        if num_black > num_white: b += 1
    return w / b


try:
    qdp = Popen("java -jar quickdraw.jar", shell=True, stdout=PIPE, stdin=PIPE)
    op = qdp.stdin
    qdq = Queue()
    qdt = Thread(target=_queueqd, args=(qdp.stdout, qdq))
    qdt.daemon = True
    qdt.start()
except:
    print("quickdraw.jar must be in the same directory as this python script.")
    quit()

send("mouseclick True\n")
# initialize the board, get ready for the game

board = makeBoard()
draw(board)

# game time, let's play...
# human player is white and moves first
Ejemplo n.º 54
0
#### CONFIGURE YOUR SETTINGS HERE ####
GH_USERNAME = '******'
GH_PASSWORD = '******'
OWNER = 'tsjoji'
REPOSITORY = 'blackhatpython'
BRANCH = 'main'
######################################

trojan_id = "abc"  # unique id for this trojan
relative_path = ""
trojan_config = relative_path + "config/{0}.json".format(trojan_id)
data_path = relative_path + "data/{0}/".format(trojan_id)
trojan_modules = []
configured = False
task_queue = Queue()

def connect_to_github():
	# NOTE: there is also an option to login via tokens (see docs for more info)
	gh = login(username=GH_USERNAME, password=GH_PASSWORD)
	repo = gh.repository(OWNER, REPOSITORY)
	branch = repo.branch(BRANCH)

	return gh, repo, branch

def get_file_contents(filepath):
	gh, repo, branch = connect_to_github()
	tree = branch.commit.commit.tree.to_tree().recurse()

	for filename in tree.tree:
		if filepath in filename.path:
Ejemplo n.º 55
0
def execute_jobs(jobs,
                 show_progress=False,
                 number_of_workers=10,
                 debug_jobs=False):
    from vcstool.streams import stdout
    if debug_jobs:
        logger.setLevel(logging.DEBUG)

    results = []

    job_queue = Queue()
    result_queue = Queue()

    # create worker threads
    workers = []
    for _ in range(min(number_of_workers, len(jobs))):
        worker = Worker(job_queue, result_queue)
        workers.append(worker)

    # fill job_queue with jobs for each worker
    pending_jobs = list(jobs)
    running_job_paths = []
    while job_queue.qsize() < len(workers):
        job = get_ready_job(pending_jobs)
        if not job:
            break
        running_job_paths.append(job['client'].path)
        logger.debug("started '%s'" % job['client'].path)
        job_queue.put(job)
    logger.debug('ongoing %s' % running_job_paths)

    # start all workers
    [w.start() for w in workers]

    # collect results
    while len(results) < len(jobs):
        (job, result) = result_queue.get()
        logger.debug("finished '%s'" % job['client'].path)
        running_job_paths.remove(result['job']['client'].path)
        if show_progress and len(jobs) > 1:
            if result['returncode'] == NotImplemented:
                stdout.write('s')
            elif result['returncode']:
                stdout.write('E')
            else:
                stdout.write('.')
            if debug_jobs:
                stdout.write('\n')
            stdout.flush()
        result.update(job)
        results.append(result)
        if pending_jobs:
            for pending_job in pending_jobs:
                pending_job.get('depends', set()).discard(job['client'].path)
            while job_queue.qsize() < len(workers):
                job = get_ready_job(pending_jobs)
                if not job:
                    break
                running_job_paths.append(job['client'].path)
                logger.debug("started '%s'" % job['client'].path)
                job_queue.put(job)
            assert running_job_paths
        if running_job_paths:
            logger.debug('ongoing ' + str(running_job_paths))
    if show_progress and len(jobs) > 1 and not debug_jobs:
        print('', file=stdout)  # finish progress line

    # join all workers
    for w in workers:
        w.done = True
    [w.join() for w in workers]
    return results
Ejemplo n.º 56
0
class Pixels:
    NUM_LEDS = 12
    PIN_MOSI = 10
    PIN_SCLK = 11
    PIN_SEL = 7  # CE1
    brightness = 5

    def __init__(self, pattern=AlexaLedPattern):
        self.pattern = pattern(show=self.show)

        self.dev = APA102(num_led=self.NUM_LEDS)

        self.power = LED(5)
        self.power.on()

        self.queue = Queue()
        self.thread = Thread(target=self._run)
        self.thread.daemon = True
        self.thread.start()

        self.last_direction = None

    def wakeup(self, direction=0):
        self.last_direction = direction

        def f():
            self.pattern.wakeup(direction)

        self.put(f)

    def listen(self):
        if self.last_direction:

            def f():
                self.pattern.wakeup(self.last_direction)

            self.put(f)
        else:
            self.put(self.pattern.listen)

    def think(self):
        self.put(self.pattern.think)

    def speak(self):
        self.put(self.pattern.speak)

    def off(self):
        self.put(self.pattern.off)

    def put(self, func):
        self.pattern.stop = True
        self.queue.put(func)

    def _run(self):
        while True:
            func = self.queue.get()
            self.pattern.stop = False
            func()

    def show(self, data):
        for i in range(self.NUM_LEDS):
            self.dev.set_pixel(i, int(data[4 * i + 1]), int(data[4 * i + 2]),
                               int(data[4 * i + 3]))

        self.dev.show()
Ejemplo n.º 57
0
class Downloader:
    def __init__(self, dir_path):
        self.arg_list = ["rtmpdump", "-q"]
        self.counter = 0
        self.totalfiles = 0
        self.queue = Queue()
        self.dir_path = dir_path
        self.numthreads = 8

        self.setTotalNumFiles()
        self.queueUp()
        self.launchThreads()
        self.queue.join()
        self.mergeAllDirs()

    #generator
    def walkRootDir(self):
        for dirname in os.listdir(self.dir_path):
            #skip hidden files
            if not dirname[0].isalnum():
                continue

            to_dir_path = os.path.join(self.dir_path, dirname)
            for filename in os.listdir(to_dir_path):
                file_path = os.path.join(to_dir_path, filename)
                file_prefix, ext = os.path.splitext(filename)

                if os.path.isdir(file_path) or ext.strip() != ".html":
                    continue
                with open(file_path, 'r') as html_file:
                    soup = BeautifulSoup(html_file.read(), "html.parser")
                    flash_pattern = re.compile(
                        r'var\s*flashvars\s*\=\s*(((.|\n))+?\})\;')
                    flashvars = soup.find("script", text=flash_pattern)
                    if flashvars:
                        matching_id = flash_pattern.search(flashvars.text)
                        if matching_id:
                            stripped = re.search(r's:\s*(\"\d+\")\s*\,',
                                                 matching_id.group(1))
                            if stripped:
                                rec_id = stripped.group(1).replace('"', "")
                                yield (rec_id, file_prefix, dirname)
                            else:
                                print "FILE INVALID: " + filename
                                self.totalfiles -= 1
                        else:
                            print "FILE INVALID: " + filename
                            self.totalfiles -= 1
                    else:
                        print "FILE INVALID: " + filename
                        self.totalfiles -= 1

    def download(self, address, out_filename, dirname):
        out_filename = out_filename + ".flv"
        try:
            subprocess.check_call(self.arg_list + ["-r", address] +
                                  ["-o", out_filename])
            self.counter += 1
            sys.stdout.write("Downloaded {0}  -> {1}/{2}\n".format(
                out_filename, self.counter, self.totalfiles))
            sys.stdout.flush()
        except subprocess.CalledProcessError, e:
            sys.stdout.write("UNABLE TO DOWNLOAD: {0}\n".format(out_filename))
            sys.stdout.flush()
        else:
Ejemplo n.º 58
0
class ImageWatcher(Module):
    """Watch for new images and write them to all given destinations.

    Watches a path for new images and stores them in all given destinations. Only if all operations were successful,
    the file is deleted.
    """
    def __init__(self,
                 watchpath: str = None,
                 destinations: list = None,
                 *args,
                 **kwargs):
        """Create a new image watcher.

        Args:
            watchpath: Path to watch.
            destinations: Filename patterns for destinations.
        """
        Module.__init__(self, *args, **kwargs)

        # test import
        import pyinotify

        # add thread func
        self._add_thread_func(self._worker, True)

        # variables
        self._watchpath = watchpath
        self._notifier = None
        self._queue = Queue()

        # filename patterns
        if not destinations:
            raise ValueError(
                'No filename patterns given for the destinations.')
        self._destinations = destinations

    def open(self):
        """Open module."""
        Module.open(self)
        import pyinotify

        class EventHandler(pyinotify.ProcessEvent):
            """Event handler for file watcher."""
            def __init__(self, main, *args, **kwargs):
                """Create event handler."""
                pyinotify.ProcessEvent.__init__(self, *args, **kwargs)
                self.main = main

            def process_IN_CLOSE_WRITE(self, event):
                """React to IN_CLOSE_WRITE events."""
                self.main.add_image(event.pathname)

        # start watching directory
        if self._watchpath:
            log.info('Start watching directory %s for changes...',
                     self._watchpath)
            wm = pyinotify.WatchManager()
            wm.add_watch(self._watchpath, pyinotify.IN_CLOSE_WRITE)
            self._notifier = pyinotify.ThreadedNotifier(
                wm, default_proc_fun=EventHandler(self))  #, name='observer')
            self._notifier.start()

    def close(self):
        """Close image watcher."""
        Module.close(self)

        # stop watching
        if self._notifier:
            log.info('Stop watching directory...')
            self._notifier.stop()

    def add_image(self, filename: str):
        """Add an image to the image database.

        Args:
            filename (str): Local filename of new image.
        """

        # log file
        log.info('Adding new image %s...', filename)
        self._queue.put(filename)

    def _clear_queue(self):
        """Clear the queue with new files."""

        # clear queue
        with self._queue.mutex:
            self._queue.queue.clear()

    def _worker(self):
        """Worker thread."""

        # first, add all files from directory to queue
        self._clear_queue()
        for filename in sorted(glob.glob(os.path.join(self._watchpath, '*'))):
            self.add_image(filename)

        # run forever
        while not self.closing.is_set():
            # get next filename
            if self._queue.empty():
                self.closing.wait(1)
                continue
            filename = self._queue.get()
            log.info('Working on file %s...', filename)

            # better safe than sorry
            try:
                # open file
                fits_file = fits.open(filename)

                # loop archive and upload
                success = True
                for pattern in self._destinations:

                    # create filename
                    out_filename = format_filename(fits_file['SCI'].header,
                                                   pattern)

                    # store it
                    log.info('Storing file as %s...', out_filename)
                    try:
                        with self.vfs.open_file(out_filename, 'w') as dest:
                            fits_file.writeto(dest)
                    except:
                        log.exception(
                            'Error while copying file, skipping for now.')
                        success = False

                # no success_
                if not success:
                    continue

                # close and delete files
                log.info('Removing file from watch directory...')
                os.remove(filename)

            except:
                log.exception('Something went wrong.')
Ejemplo n.º 59
0
class GitCommitBearTest(unittest.TestCase):
    @staticmethod
    def run_git_command(*args, stdin=None):
        run_shell_command(' '.join(('git', ) + args), stdin)

    @staticmethod
    def git_commit(msg):
        # Use stdin mode from git, since -m on Windows cmd does not support
        # multiline messages.
        GitCommitBearTest.run_git_command('commit',
                                          '--allow-empty',
                                          '--allow-empty-message',
                                          '--file=-',
                                          stdin=msg)

    def run_uut(self, *args, **kwargs):
        """
        Runs the unit-under-test (via `self.uut.run()`) and collects the
        messages of the yielded results as a list.

        :param args:   Positional arguments to forward to the run function.
        :param kwargs: Keyword arguments to forward to the run function.
        :return:       A list of the message strings.
        """
        return list(result.message for result in self.uut.run(*args, **kwargs))

    def assert_no_msgs(self):
        """
        Assert that there are no messages in the message queue of the bear, and
        show the messages in the failure messgae if it is not empty.
        """
        self.assertTrue(
            self.msg_queue.empty(),
            'Expected no messages in bear message queue, but got: ' +
            str(list(str(i) for i in self.msg_queue.queue)))

    def setUp(self):
        self.msg_queue = Queue()
        self.section = Section('')
        self.uut = GitCommitBear(None, self.section, self.msg_queue)

        self._old_cwd = os.getcwd()
        self.gitdir = mkdtemp()
        os.chdir(self.gitdir)
        self.run_git_command('init')
        self.run_git_command('config', 'user.email [email protected]')
        self.run_git_command('config', 'user.name coala')

    @staticmethod
    def _windows_rmtree_remove_readonly(func, path, excinfo):
        os.chmod(path, stat.S_IWRITE)
        func(path)

    def tearDown(self):
        os.chdir(self._old_cwd)
        if platform.system() == 'Windows':
            onerror = self._windows_rmtree_remove_readonly
        else:
            onerror = None
        shutil.rmtree(self.gitdir, onerror=onerror)

    def test_git_failure(self):
        # In this case use a reference to a non-existing commit, so just try
        # to log all commits on a newly created repository.
        self.assertEqual(self.run_uut(), [])

        git_error = self.msg_queue.get().message
        self.assertEqual(git_error[:4], 'git:')

        self.assert_no_msgs()

    def test_empty_message(self):
        self.git_commit('')

        self.assertEqual(self.run_uut(), ['HEAD commit has no message.'])
        self.assert_no_msgs()

        self.assertEqual(self.run_uut(allow_empty_commit_message=True), [])
        self.assert_no_msgs()

    def test_shortlog_checks_length(self):
        self.git_commit('Commit messages that nearly exceed default limit..')

        self.assertEqual(self.run_uut(), [])
        self.assert_no_msgs()

        self.assertEqual(self.run_uut(shortlog_length=17), [
            'Shortlog of the HEAD commit contains 50 '
            'character(s). This is 33 character(s) longer than '
            'the limit (50 > 17).'
        ])
        self.assert_no_msgs()

        self.git_commit('Add a very long shortlog for a bad project history.')
        self.assertEqual(self.run_uut(), [
            'Shortlog of the HEAD commit contains 51 '
            'character(s). This is 1 character(s) longer than '
            'the limit (51 > 50).'
        ])
        self.assert_no_msgs()

    def test_shortlog_checks_shortlog_trailing_period(self):
        self.git_commit('Shortlog with dot.')
        self.assertEqual(self.run_uut(shortlog_trailing_period=True), [])
        self.assertEqual(self.run_uut(shortlog_trailing_period=False),
                         ['Shortlog of HEAD commit contains a period at end.'])
        self.assertEqual(self.run_uut(shortlog_trailing_period=None), [])

        self.git_commit('Shortlog without dot')
        self.assertEqual(
            self.run_uut(shortlog_trailing_period=True),
            ['Shortlog of HEAD commit contains no period at end.'])
        self.assertEqual(self.run_uut(shortlog_trailing_period=False), [])
        self.assertEqual(self.run_uut(shortlog_trailing_period=None), [])

    def test_shortlog_wip_check(self):
        self.git_commit('[wip] Shortlog')
        self.assertEqual(self.run_uut(shortlog_wip_check=False), [])
        self.assertEqual(self.run_uut(shortlog_wip_check=True), [
            'This commit seems to be marked as work in progress '
            'and should not be used in production. Treat '
            'carefully.'
        ])
        self.assertEqual(self.run_uut(shortlog_wip_check=None), [])
        self.git_commit('Shortlog as usual')
        self.assertEqual(self.run_uut(shortlog_wip_check=True), [])

    def test_shortlog_checks_imperative(self):
        self.git_commit('tag: Add shortlog in imperative')
        self.assertNotIn(
            "Shortlog of HEAD commit isn't in imperative "
            "mood! Bad words are 'added'", self.run_uut())
        self.git_commit('Added invalid shortlog')
        self.assertIn(
            "Shortlog of HEAD commit isn't in imperative "
            "mood! Bad words are 'Added'", self.run_uut())
        self.git_commit('Adding another invalid shortlog')
        self.assertIn(
            "Shortlog of HEAD commit isn't in imperative "
            "mood! Bad words are 'Adding'", self.run_uut())
        self.git_commit('Added another invalid shortlog')
        self.assertNotIn(
            "Shortlog of HEAD commit isn't in imperative "
            "mood! Bad words are 'Added'",
            self.run_uut(shortlog_imperative_check=False))

    def test_shortlog_checks_regex(self):
        pattern = '.*?: .*[^.]'

        self.git_commit('tag: message')
        self.assertEqual(self.run_uut(shortlog_regex=pattern), [])

        self.git_commit('tag: message invalid.')
        self.assertEqual(self.run_uut(shortlog_regex=pattern), [
            'Shortlog of HEAD commit does not match given regex: {regex}'.
            format(regex=pattern)
        ])

        self.git_commit('SuCkS cOmPleTely')
        self.assertEqual(self.run_uut(shortlog_regex=pattern), [
            'Shortlog of HEAD commit does not match given regex: {regex}'.
            format(regex=pattern)
        ])
        # Check for full-matching.
        pattern = 'abcdefg'

        self.git_commit('abcdefg')
        self.assertEqual(self.run_uut(shortlog_regex=pattern), [])

        self.git_commit('abcdefgNO MATCH')
        self.assertEqual(self.run_uut(shortlog_regex=pattern), [
            'Shortlog of HEAD commit does not match given regex: {regex}'.
            format(regex=pattern)
        ])

    def test_body_checks(self):
        self.git_commit(
            'Commits message with a body\n\n'
            'nearly exceeding the default length of a body, but not quite. '
            'haaaaaands')

        self.assertEqual(self.run_uut(), [])
        self.assert_no_msgs()

        self.git_commit('Shortlog only')

        self.assertEqual(self.run_uut(), [])
        self.assert_no_msgs()

        # Force a body.
        self.git_commit('Shortlog only ...')
        self.assertEqual(self.run_uut(force_body=True),
                         ['No commit message body at HEAD.'])
        self.assert_no_msgs()

        # Miss a newline between shortlog and body.
        self.git_commit('Shortlog\nOops, body too early')
        self.assertEqual(self.run_uut(), [
            'No newline found between shortlog and body at '
            'HEAD commit. Please add one.'
        ])
        self.assert_no_msgs()

        # And now too long lines.
        self.git_commit('Shortlog\n\n'
                        'This line is ok.\n'
                        'This line is by far too long (in this case).\n'
                        'This one too, blablablablablablablablabla.')
        self.assertEqual(self.run_uut(body_line_length=41), [
            'Body of HEAD commit contains too long lines. '
            'Commit body lines should not exceed 41 '
            'characters.'
        ])
        self.assert_no_msgs()

        # Allow long lines with ignore regex
        self.git_commit('Shortlog\n\n'
                        'This line is ok.\n'
                        'This line is by far too long (in this case).')
        self.assertEqual(
            self.run_uut(body_line_length=41,
                         ignore_length_regex=('^.*too long', )), [])
        self.assertTrue(self.msg_queue.empty())

        # body_regex, not fully matched
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Fix 1112')
        self.assertEqual(self.run_uut(body_regex=r'Fix\s+[1-9][0-9]*\s*'), [
            'No match found in commit message for the regular '
            r'expression provided: Fix\s+[1-9][0-9]*\s*'
        ])
        self.assert_no_msgs()

        # Matching with regexp, fully matched
        self.git_commit('Shortlog\n\n' 'TICKER\n' 'CLOSE 2017')
        self.assertEqual(
            self.run_uut(body_regex=r'TICKER\s*CLOSE\s+[1-9][0-9]*'), [])
        self.assert_no_msgs()

    def test_check_issue_reference(self):
        # Commit with no remotes configured
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Closes #01112')
        self.assertEqual(self.run_uut(body_close_issue=True), [])

        # Adding BitBucket remote for testing
        self.run_git_command('remote', 'add', 'test',
                             'https://bitbucket.com/user/repo.git')

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Closes #1112')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True),
            ['Host bitbucket does not support full issue '
             'reference.'])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Closes #1112')
        self.assertEqual(self.run_uut(body_close_issue=True), [])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Resolves https://bitbucket.org/user/repo/issues/1/')
        self.assertEqual(self.run_uut(body_close_issue=True), [
            'Invalid issue reference: '
            'https://bitbucket.org/user/repo/issues/1/'
        ])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Resolves https://bitbucket.org/user/repo/issues/1/')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True),
            ['Host bitbucket does not support full issue '
             'reference.'])

        # Adding BitBucket's ssh remote for testing
        self.run_git_command('remote', 'set-url', 'test',
                             '[email protected]:user/repo.git')

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Closes #1112')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True),
            ['Host bitbucket does not support full issue '
             'reference.'])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Closes #1112')
        self.assertEqual(self.run_uut(body_close_issue=True), [])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Fix issue #1112')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_enforce_issue_reference=True), [])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Resolving    bug#1112')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_enforce_issue_reference=True),
            ['Invalid issue reference: bug#1112'])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Fixed randomkeyword#1112')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_enforce_issue_reference=True),
            ['Invalid issue reference: randomkeyword#1112'])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Closes#1112')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_enforce_issue_reference=True),
            ['Body of HEAD commit does not contain any '
             'issue reference.'])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Closes bug bug#1112')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_enforce_issue_reference=True),
            ['Invalid issue reference: bug'])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Closesticket #1112')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_enforce_issue_reference=True),
            ['Body of HEAD commit does not contain any '
             'issue reference.'])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Resolves https://bitbucket.org/user/repo/issues/1/')
        self.assertEqual(self.run_uut(body_close_issue=True), [
            'Invalid issue reference: '
            'https://bitbucket.org/user/repo/issues/1/'
        ])

        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Resolves https://bitbucket.org/user/repo/issues/1/')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True),
            ['Host bitbucket does not support full issue '
             'reference.'])

        # Adding GitHub remote for testing, ssh way :P
        self.run_git_command('remote', 'set-url', 'test',
                             '[email protected]:user/repo.git')

        # GitHub host with an issue
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Fixed https://github.com/usr/repo/issues/1112\n'
                        'and https://github.com/usr/repo/issues/1312')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True), [])

        # No keywords and no issues
        self.git_commit('Shortlog\n\n'
                        'This line is ok.\n'
                        'This line is by far too long (in this case).\n'
                        'This one too, blablablablablablablablabla.')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True,
                         body_close_issue_on_last_line=True), [])
        self.assert_no_msgs()

        # No keywords, no issues, no body
        self.git_commit('Shortlog only')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_on_last_line=True), [])
        self.assert_no_msgs()

        # Has keyword but no valid issue URL
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Fix https://github.com/user/repo.git')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True), [
                             'Invalid full issue reference: '
                             'https://github.com/user/repo.git'
                         ])
        self.assert_no_msgs()

        # GitHub host with short issue tag
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Fix #1112, #1115 and #123')
        self.assertEqual(self.run_uut(body_close_issue=True, ), [])

        # GitHub host with invalid short issue tag
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Fix #01112 and #111')
        self.assertEqual(self.run_uut(body_close_issue=True, ),
                         ['Invalid issue number: #01112'])
        self.assert_no_msgs()

        # GitHub host with no full issue reference
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Fix #1112')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True),
            ['Invalid full issue reference: #1112'])
        self.assert_no_msgs()

        # Invalid characters in issue number
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Fix #1112-3')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True),
            ['Invalid full issue reference: #1112-3'])
        self.assert_no_msgs()

        # Adding GitLab remote for testing
        self.run_git_command('remote', 'set-url', 'test',
                             'https://gitlab.com/user/repo.git')

        # GitLab chosen and has an issue
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Resolve https://gitlab.com/usr/repo/issues/1112\n'
                        'and https://gitlab.com/usr/repo/issues/1312')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True), [])

        # Invalid issue number in URL
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Closing https://gitlab.com/user/repo/issues/123\n'
                        'and https://gitlab.com/user/repo/issues/not_num')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True), [
                             'Invalid full issue reference: '
                             'https://gitlab.com/user/repo/issues/not_num'
                         ])
        self.assert_no_msgs()

        # Invalid URL
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Fix www.google.com/issues/hehehe')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True),
            ['Invalid full issue reference: '
             'www.google.com/issues/hehehe'])
        self.assert_no_msgs()

        # One of the short references is broken
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Resolve #11 and close #notnum')
        self.assertEqual(self.run_uut(body_close_issue=True, ),
                         ['Invalid issue reference: #notnum'])
        self.assert_no_msgs()

        # Close issues in other repos
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Resolve #11 and close github/gitter#32')
        self.assertEqual(self.run_uut(body_close_issue=True, ), [])
        self.assert_no_msgs()

        # Incorrect close issue other repo pattern
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Another line, blablablablablabla.\n'
                        'Fix #11 and close github#32')
        self.assertEqual(self.run_uut(body_close_issue=True, ),
                         ['Invalid issue reference: github#32'])
        self.assert_no_msgs()

        # Last line enforce full URL
        self.git_commit('Shortlog\n\n'
                        'First line, blablablablablabla.\n'
                        'Fix http://gitlab.com/user/repo/issues/1112\n'
                        'Another line, blablablablablabla.\n')
        self.assertEqual(
            self.run_uut(body_close_issue=True,
                         body_close_issue_full_url=True,
                         body_close_issue_on_last_line=True,
                         body_enforce_issue_reference=True),
            [
                'Body of HEAD commit does not contain any full issue'
                ' reference in the last line.'
            ])
        self.assert_no_msgs()

    def test_different_path(self):
        no_git_dir = mkdtemp()
        self.git_commit('Add a very long shortlog for a bad project history.')
        os.chdir(no_git_dir)
        # When section doesn't have a project_dir
        self.assertEqual(self.run_uut(), [])
        git_error = self.msg_queue.get().message
        self.assertEqual(git_error[:4], 'git:')
        # when section does have a project_dir
        self.section.append(Setting('project_dir', escape(self.gitdir, '\\')))
        self.assertEqual(self.run_uut(), [
            'Shortlog of the HEAD commit contains 51 '
            'character(s). This is 1 character(s) longer than '
            'the limit (51 > 50).'
        ])
        self.assertEqual(get_config_directory(self.section), self.gitdir)
        os.chdir(self.gitdir)
        os.rmdir(no_git_dir)
Ejemplo n.º 60
0
from queue import Queue
# 使用
q = Queue()
q.put(url)
q.get() # 当队列为空时,阻塞
q.get(block=True,timeout=3) # 超过3秒抛异常
q.get(block=False) # 为空时直接抛异常
q.empty()