Example #1
0
	def start(self):
		if not self.pid:
			classPath = os.path.join(sourcePath, self.__deployDir, 'lib/*')
			# 如下的命令不会返回结果, 需要通过线程执行
			demoThread = Thread(target=executeCmdAndReturn,
			                    args=(['nohup java -jar ' + self.__targetJar + ' -classpath ' + classPath + ' &']),
			                    kwargs={'cwd': os.path.join(sourcePath, self.__deployDir), 'shell': True, 'log': True})
			demoThread.setDaemon(True)
			demoThread.setName('io-all启动线程')
			demoThread.start()
			count = 0
			while not count:
				record('.', terminator='')
				sleep(1)
				if self.__serverPort and self.__serverProtocol == 'dubbo':
					returnCode, out, _ = executeCmdAndReturn(
						['echo status | nc -i 1 127.0.0.1 ' + self.__serverPort + ' | grep -c OK'],
						shell=True)
					if returnCode == 0:
						count = int(out)
				else:
					if self.isStarted():
						count = 1

				if count > 0:
					break
Example #2
0
class TimeHandler:
    def __init__(self, name, interval, get_msg=None):
        self.name = name
        self.interval = interval
        self.get_msg = get_msg
        self._active = False

    def run(self, interval):
        while self._active:
            try:
                msg = self.get_msg() if self.get_msg else None
                self.handle(msg)
            except Exception as e:
                logger.exception(f'<TimeHandler>-{self.name} exception:{e}')
            finally:
                time.sleep(interval)

    def stop(self):
        self._active = False

    def start(self):
        self.timer = Thread(target=self.run, args=(self.interval, ))
        self.timer.setName(self.name)
        self.timer.setDaemon(True)
        self.timer.start()

    @abstractmethod
    def handle(self, msg):
        ...
Example #3
0
    def start(self):
        # doing some initialisation
        if self.master:
            self.root.hash_to_ignore = HashToIgnore()
            self.root.update_hash = set()
            self.root.update_hash_lock = Lock()
            self.root.bad_info_hash = {}
            self.root.good_info_hash = {}
            self.root.last_update_hash = 0
        self.hash_to_fetch = collections.OrderedDict()
        self.hash_to_fetch_tried = collections.defaultdict(set)
        self.hash_to_fetch_totry = collections.defaultdict(set)

        # calling parent method
        super(Crawler, self).start()

        # starting threads
        for f, name in [(self._client_loop, 'client_loop')]:
            t = Thread(target=f)
            t.setName("%s:%s" % (self.prefix, name))
            t.daemon = True
            t.start()
            self._threads.append(t)
            self.threads.append(t)
        if self.master:
            # addings threads to parent threads list
            self.root.client.start()
            self._threads.extend(self.root.client.threads)
            self.threads.extend(self.root.client.threads)
Example #4
0
def start_tcp_server(s_util, event_hand):

    print("Starting TCP server ...")
    print("Server parameters:")
    print("server_ID: {}".format(tcp_settings.ID))
    print("TCP_EnablePinging: {}".format(tcp_settings.pinging))
    print("TCP_pingMissCutout: {}".format(tcp_settings.pingMissCutout))
    print("pingMaxTime: {}".format(tcp_settings.pingMaxTime))
    print("pingRate: {}".format(tcp_settings.pingRate))

    server = TCPServer.ThreadedTCPServer(HOST, PORT,
                                         TCPServer.ThreadedTCPRequestHandler,
                                         tcp_settings, s_util, event_hand)
    ip, port = server.server_address

    # Start a thread with the server -- that thread will then start one
    # more thread for each request
    server_thread = Thread(target=server.serve_forever)
    server_thread.setName("TCP_ServerThread")

    # Exit the server thread when the main thread terminates
    server_thread.daemon = True
    server_thread.start()

    print("TCP server running in thread:", server_thread.name)
    print("ip: ", ip, " port: ", port, "\n")

    return server, server_thread
def spawn_threads(mongo_res):
    try:
        start = int(datetime.now().timestamp() * 1000 -
                    float(Config.SLIDING_WINDOW) * 1000)
        end = int(datetime.now().timestamp() * 1000)
        interval_list = generate_time_intervals(
            start,
            int(Config.SLIDING_WINDOW_PIECE) * 1000, end)
        data_list1 = []
        for lst in interval_list:
            data = get_data_in_interval(mongo_res, lst[0], lst[1])
            data_list1.append(get_result_table(data))
        print("Interval list: ", interval_list)
        print("Data list1: ", data_list1)

        var_thread = Thread(target=variance_method, args=(data_list1, ))
        var_thread.daemon = True
        var_thread.setName("Variance Method thread")

        slide_thread = Thread(target=sliding_window, args=(data_list1, ))
        slide_thread.daemon = True
        slide_thread.setName("Sliding Window thread")

        var_thread.start()
        slide_thread.start()
        print("Threads started :" + var_thread.getName() + ", " +
              slide_thread.getName())
    except BaseException as e:
        print("Error in Spawn threads")
        print(e)
Example #6
0
def data_hander_and_save(html):
    # deal the html string to right format we need
    new_html = _html_hander(html)
    videologger.info("hand the html text into we need successful")
    # get the elements for get data through Xpath
    elements = _get_element(new_html)
    videologger.info("get elements for xpath successful")
    # get data dict
    _get_data(elements)
    # start three threading to check data if already saved and saved the new data
    thread_list = []
    for i in range(3):
        T = Thread(target=_check_finger_and_save_data)
        # set threading name
        T.setName("videoDataHander{}".format(i))
        thread_list.append(T)
    for t in thread_list:
        # set threading follow the main threading
        # than start the threading
        t.setDaemon(True)
        t.start()
        videologger.info(
            "threading which named {} to check and save data".format(
                T.getName()))
    # join the queue to ensure the main threading won't stop until the queue is empty
    videoDataQueue.join()
    if videoDataQueue.empty():
        videologger.info("video data queus is empty, the spiders will be stop")
 def download(self):
     if DEBUG:
         print "http-sdownload: download()"
     self.cancelled = False
     if self.downloader.picker.am_I_complete():
         self.downloader.downloads.remove(self)
         return
     self.index = self.downloader.picker.next(haveall, self._want, self)
     if self.index is None:
         self.resched(0.01)
         return
     if (self.index is None and not self.endflag
             and not self.downloader.peerdownloader.has_downloaders()):
         self.endflag = True
         self.index = self.downloader.picker.next(haveall, self._want, self)
     if self.index is None:
         self.endflag = True
         self.resched()
     else:
         self.url = (self.seedurl + '&piece=' + str(self.index))
         self._get_requests()
         if self.request_size < self.downloader.storage._piecelen(
                 self.index):
             self.url += '&ranges=' + self._request_ranges()
         rq = Thread(target=self._request)
         rq.setName("HoffmanHTTPDownloader" + rq.getName())
         rq.setDaemon(True)
         rq.start()
         self.active = True
Example #8
0
 def deadlocks(f, t):
     th = Thread(target=f)
     th.setName("Thread")
     th.setDaemon(1)
     th.start()
     th.join(t)
     return th.isAlive()
Example #9
0
 def onOpenDest(self, event = None, openFile=False):
     # open Destination
     item = self.standardDetails.getData()
     state = item.get('ds')
     
     if state:
         dest = state.get_download().get_dest_dir()
         if openFile:
             destfiles = state.get_download().get_dest_files()
             if len(destfiles) == 1:
                 dest = destfiles[0][1]
         if sys.platform == 'darwin':
             dest = 'file://%s' % dest
         
         print >> sys.stderr, dest
         complete = True
         # check if destination exists
         assert dest is not None and os.access(dest, os.R_OK), 'Could not retrieve destination'
         try:
             t = Thread(target = open_new, args=(str(dest),))
             t.setName( "FilesOpenNew"+t.getName() )
             t.setDaemon(True)
             t.start()
         except:
             print_exc()
             pass
             
     elif DEBUG:
         print >>sys.stderr,'GUIUtil: onOpenFileDest failed: no torrent selected'
Example #10
0
    def run(self):
        print("VEDIO client starts...")
        while True:
            # 循环连接,如果连接不上,间隔一秒后再次连接
            try:
                self.sock.connect(self.ADDR)
                break
            except:
                time.sleep(1)
                # continue
        print("VEDIO client connected...")
        
        # 同步监听摄像头关闭请求
        cl = Thread(target=self.close_listener)
        cl.setName('Close listener')
        cl.start()

        while self.cap.isOpened():
            ret, frame = self.cap.read()
            # 一个矩形窗口,即相机所拍摄的窗口大小
            sframe = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx)
            data = pickle.dumps(sframe)
            zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION)
            try:
                # 将图像数据压缩后发送到服务端
                self.sock.sendall(struct.pack("L", len(zdata)) + zdata)
            except:
                break
            for i in range(self.interval):
                self.cap.read()
Example #11
0
class Scheduler:
    """
    Class that schedule the spiders to update the events inside the events source.
    """
    @inject
    def __init__(self, events_source: EventsSource,
                 classroom_source: ClassroomSource,
                 settings_source: SettingsSource,
                 spiders_provider: SpiderFactory):
        self._status_subject = Subject()
        self._collection = events_source  # type: EventsSource
        self._classroom_source = classroom_source  # type: ClassroomSource
        self._spiders = spiders_provider.get_spiders(
        )  # type: List[BaseSpider]
        self._settings_source = settings_source  # type: SettingsSource
        self._stop_event = Event()
        self._thread = Thread(target=_scheduler_loop,
                              args=(events_source, classroom_source,
                                    self._spiders, settings_source,
                                    self._stop_event,
                                    self._status_subject))  # type: Thread
        self._thread.setName("BookingBot Scheduler")
        self._thread.daemon = True

    def start(self):
        self._thread.start()

    def stop(self):
        self._stop_event.set()

    def on_status_changed(self) -> Observable:
        return self._status_subject
Example #12
0
def fetch_data_to_local(discord_object: DiscordBot.DiscordBot):
    ss = 'oceaniaGuildActivity'
    spreadsheet = {ss: {'Oceania': {}}}
    thread_pool = []
    discord_object.spreadsheet_accessor.open_spreadsheet(
        ss,
        'https://docs.google.com/spreadsheets/d/1VWg6INme20CV9BJnhgCLYbtlwnC3xl41Opz8Tzq9Auo/edit#gid=0'
    )
    discord_object.spreadsheet_accessor.open_worksheet(ss, 'Oceania')
    arguments = [(ss, 'Oceania', 1, 1, False), (ss, 'Oceania', 2, 1, False),
                 (ss, 'Oceania', 4, 1, False), (ss, 'Oceania', 6, 1, False),
                 (ss, 'Oceania', 7, 1, False)]
    pointer = [['Oceania', 'Name'], ['Oceania', 'Capitalized'],
               ['Oceania', 'discordID'], ['Oceania', 'ezname'],
               ['Oceania', 'eznameUpper']]

    spreadsheet[ss]['Oceania']['lastpos'] = len(
        discord_object.spreadsheet_accessor.get_column_values(
            ss, 'Oceania', 1, 0, True))
    discord_object.locals[COMMAND] = spreadsheet
    for x, argument in enumerate(arguments):
        thread = Thread(target=load_data,
                        args=(discord_object, argument, pointer[x]))
        thread.setName(pointer[x][1])
        thread_pool.append(thread)
        thread.start()
    while len(thread_pool) > 0:
        removable = []
        for thread in thread_pool:
            if not thread.is_alive():
                removable.append(thread)
        for x in removable:
            print('thread ' + x.getName() + '  completed')
            thread_pool.pop(thread_pool.index(x))
Example #13
0
 def __setup__(self):
     clean_thread = Thread(target=self.cleaner)
     clean_thread.setDaemon(True)
     clean_thread.setName("clean temp db thread")
     clean_thread.start()
     self.active = True
     super().__setup__()
Example #14
0
class NonBlockingStreamReader:
    def __init__(self, stream):
        self._stream = stream
        self._queue = Queue()
        self._thread = None

    def start(self):
        def _worker():
            while True:
                line = self._stream.readline()
                if line:
                    self._queue.put(line)
                else:
                    raise RuntimeError("line is empty")

        self._thread = Thread(target=_worker)
        self._thread.setDaemon(True)
        self._thread.setName("NonBlockingStreamReader of %s" % repr(self._stream))
        self._thread.start()

    def readline(self, timeout=None):
        try:
            return self._queue.get(block=timeout is not None, timeout=timeout)
        except Empty:
            return None
Example #15
0
def get_dir(db=None):
    if db is None:
        db = MySQLdb.connect(**config.mysql)
    files = os.listdir(config.torrents_dir)
    hashs = [h[:-8] for h in files if h.endswith(".torrent")]
    cur = db.cursor()
    cur.execute("SELECT hash FROM torrents WHERE created_at IS NULL AND (%s)"  % " OR ".join("hash=%s" for hash in hashs), tuple(hashs))
    ret = [r[0] for r in cur]
    new_hash = set(hashs).difference(ret)
    count = len(new_hash)
    done=0
    new_hashq = queue.Queue()
    [new_hashq.put(h) for h in new_hash]
    pbar = progressbar.ProgressBar(widgets=widget("inserting torrents in db"), maxval=count).start()
    cur.close()
    try:
        threads = []
        for i in range(0, 20):
            t = Thread(target=insert, args=(new_hashq, pbar))
            t.setName("insert-%02d" % i)
            t.daemon = True
            t.start()
            threads.append(t)
        join(threads)
    finally:
        print("")
        db.commit()

    cur.close()
    return ret + list(new_hash)
Example #16
0
 def start(self):
     for i in range(self.threadCount):
         t = Thread(target=self._consumeTask, args=(i,))
         t.setName('TaskExecutor worker %d' % (i,))
         t.setDaemon(True)  # finishes when non-daemon threads terminate
         t.start()
         self.threads.append(t)
Example #17
0
    def __init__(self, options, q_out):
        FuzzQueue.__init__(self, q_out, options.get("max_concurrent") * 5)

        self.options = options

        self.processed = 0

        self.exit_job = False
        self.mutex_multi = Lock()
        self.mutex_stats = Lock()

        self.queue_out = q_out

        # Connection pool
        self.m = None
        self.freelist = Queue()
        self.create_pool(options.get("max_concurrent"))

        th2 = Thread(target=self.__read_multi_stack)
        th2.setName('__read_multi_stack')
        th2.start()

        self.pause = Event()
        self.pause.set()

        self._proxies = None
        if options.get("proxy_list"):
            self._proxies = self.__get_next_proxy(options.get("proxy_list"))
    def rerequest(self, s, callback):
        # ProxyService_
        #
        doe_mode = self.config.get('doe_mode', 0)
        if doe_mode == DOE_MODE_PRIVATE:
            if DEBUG:
                print >> sys.stderr, "Rerequester: rerequest exited."  # + str(doe_mode)
            return
        else:
            if DEBUG:
                print >> sys.stderr, "Rerequester: rerequest did not exit"  # + str(doe_mode)
        #
        # _ProxyService

        if not self.lock.isfinished(
        ):  # still waiting for prior cycle to complete??

            def retry(self=self, s=s, callback=callback):
                self.rerequest(s, callback)

            self.sched(retry, 5)  # retry in 5 seconds
            return
        self.lock.reset()
        rq = Thread(target=self._rerequest, args=[s, callback])
        rq.setName("TrackerRerequestA" + rq.getName())
        # Arno: make this a daemon thread so the client closes sooner.
        rq.setDaemon(True)
        rq.start()
Example #19
0
 def init(self, callback=None, timeout=10):
     if callback is None:
         return self._init()
     t = Thread(target=self._init, args=[callback, timeout])
     t.setName('dd-downloader-init-' + t.getName())
     t.setDaemon(True)
     t.start()
 def wrapper(*args, **kwargs):
     from threading import Thread
     t = Thread(target=function, args=args, kwargs=kwargs)
     name = "Thread - {} (args={}, kwargs={})".format(
         function.__name__, args, kwargs)
     t.setName(name)
     t.start()
Example #21
0
    def run(self):
        # one thread for send and wait in broadcast, and main thread for wait 'AD' packets
        t = Thread(target=self.hearTCP, args=())
        t.setName('TCP')
        t.start()  # wait for 'AD' packets

        self.hearUDP()  # send 'SD' in broadcast and wait for new 'SD'
Example #22
0
def on_online(event: MySignalEvent):
    global online_thread
    global stop_thread

    logger.info("Установлен онлайн")

    stop_thread = False
    token = event.db.online_token
    if token == None:
        utils.new_message(
            event.api,
            event.chat.peer_id,
            message=
            f"❗ Токен не установлен.\n Устанувить можно в админ-панеле https://{event.db.host}"
        )
        return "ok"
    if online_thread != None and online_thread.is_alive():
        utils.new_message(event.api,
                          event.chat.peer_id,
                          message="✅ Вечный онлайн  и так запущен")
        return "ok"
    api_ = VkApi(token)
    online_thread = Thread(target=online_th, args=(api_, lambda: stop_thread))
    online_thread.setDaemon(True)
    online_thread.setName('Online Thread')
    online_thread.start()
    utils.new_message(event.api,
                      event.chat.peer_id,
                      message="✅ Вечный онлайн запущен")
    return "ok"
Example #23
0
 def deadlocks(f, t):
     th = Thread(target = f)
     th.setName("Thread")
     th.setDaemon(1)
     th.start()
     th.join(t)
     return th.isAlive()
Example #24
0
    def __init__(self, options, q_out):
	FuzzQueue.__init__(self, q_out, options.get("max_concurrent") * 5)

	self.options = options

	self.processed = 0

	self.exit_job = False
	self.mutex_multi = Lock()
	self.mutex_stats = Lock()

	self.queue_out = q_out

	# Connection pool
	self.m = None
	self.freelist = Queue()
	self.create_pool(options.get("max_concurrent"))

	th2 = Thread(target=self.__read_multi_stack)
	th2.setName('__read_multi_stack')
	th2.start()

	self.pause = Event()
	self.pause.set()

	self._proxies = None
	if options.get("proxy_list"):
	    self._proxies = self.__get_next_proxy(options.get("proxy_list"))
Example #25
0
def createThread():
    t = Thread(target=threadFunc)
    t.setName('T1')
    t.setDaemon(True)  # main thread del => del all sub thread
    # t.run()  # 2
    t.start()  # 1
    return t
Example #26
0
    def __init__(self, ip, port, handler):
        ThreadingUDPServer.__init__(self, (ip, port), handler)
        Thread.__init__(self, target=self.serve_forever)

        # Set the thread name to the class name
        Thread.setName(self, f'UDP-{self.__class__.__name__} Server')
        self.daemon = True
Example #27
0
class server(object):
    def __init__(self, logger, endpoint=('localhost', 6000), authkey=None):
        self.logger = logger

        self.listener = Listener(endpoint, authkey=authkey)
        self.port = endpoint[1]

        self.sender_queue = Queue()
        self.receiver_queue = Queue()

    def start(self):
        self.logger.debug('waiting for connection')
        self.conn = self.listener.accept()
        self.logger.debug('Connection accepted from ' +
                          str(self.listener.last_accepted))

        self.sender = Thread(target=_sender,
                             args=(self.conn, self.sender_queue))
        self.sender.setName("sender server " + str(self.port))
        self.sender.start()

        self.receiver = Thread(target=_receiver,
                               args=(self.conn, self.receiver_queue))
        self.receiver.setName("receiver server " + str(self.port))
        self.receiver.start()
Example #28
0
    def run(self):

        # create the target url list ,because the data we need is a rank,
        # so we don't need to get all data ,just top 100 is OK
        # each page has 20 data, we just need to parse page 1 to 5
        target_urls = [self.start_url.format(i) for i in range(1, 6)]

        # travers this target url list and add every one into url queue
        for url in target_urls:
            qiDianNovelUrlQueue.put(url)

        # try parse all url and get all page element add into the qiDianNovelEleQueue
        for i in range(3):
            t = Thread(target=self.parse_list_urls)
            t.setDaemon(True)
            t.setName("qiDianUrlParse{}".format(i))
            t.start()
            novellogger.info(
                "threading which named {} begin to parse novel url".format(
                    t.getName()))

        # start data handler
        time.sleep(5)
        qiDian_data_handler()

        for q in [
                qiDianNovelElementQueue, qiDianNovelUrlQueue,
                qiDianNovelDataQueue
        ]:
            q.join()
Example #29
0
def tag_reviews_multi(num_of_reviews, num_of_threads, bulk_size, labels):
    log("Tagging reviews with threading")
    log("Config : threads=" + str(num_of_threads) + " reviews_to_tag=" +
        str(num_of_reviews) + " bulk_size=" + str(bulk_size))

    #loading models
    models = load_models(labels)

    # loading all reviews ids to memory
    load_reviews_jsons(num_of_reviews)

    threads = []
    for i in range(1, num_of_threads + 1):
        t = Thread(target=thread_tagger, args=(
            bulk_size,
            models,
        ))
        t.setName("t" + str(i))
        t.setDaemon(True)
        threads.append(t)

    # starting all threads
    for thr in threads:
        thr.start()
        log("Started thread: " + thr.getName())

    # waiting for all threads to start
    for thr in threads:
        thr.join()
    log("All threads finished...")
    log("Tagged " + str(_count_success) + " reviews succesfully")
    log("Failed to tag " + str(_count_failed) + " reviews")
Example #30
0
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
    def __init__(self, address, port, requestHandler):
        self._requestHandler = requestHandler
        self._port = port
        self._running = True

        TCPServer.__init__(self, (address, port), HTTPRequestHandler)

    @property
    def requestHandler(self):
        return self._requestHandler

    def server_bind(self):
        self.allow_reuse_address = True
        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.socket.bind(self.server_address)

        logger.debug('Starting TCP server at %s' % str(self.server_address))


    def start(self):
        self._thread = Thread(target=self.serve_forever)
        self._thread.setName('ServerThread')
        self._thread.start()

    def stop(self):
        self.shutdown()
        self._thread.join()
Example #31
0
class NonBlockingStreamReader:
    def __init__(self, stream):
        self._stream = stream
        self._queue = Queue()
        self._thread = None
        self.closed = True

    def start(self, push_callback=None):
        def _worker():
            while True:
                line = self._stream.readline()
                if line:
                    if push_callback:
                        push_callback(line)
                    self._queue.put(line)
                else:
                    logger.debug("the stream may be closed")
                    break
            self.closed = True

        self._thread = Thread(target=_worker)
        self._thread.setDaemon(True)
        self._thread.setName("NonBlockingStreamReader of %s" %
                             repr(self._stream))
        self.closed = False
        self._thread.start()

    def readline(self, timeout=None):
        try:
            return self._queue.get(block=timeout is not None, timeout=timeout)
        except Empty:
            return None
Example #32
0
    def rerequest(self, s, callback):
        # ProxyService_
        #
        proxy_mode = self.config.get('proxy_mode',0)
        if DEBUG:
            if proxy_mode == PROXY_MODE_PRIVATE:
                if True:
                    print "_rerequest exited."# + str(proxy_mode)
                return
            else:
                if True:
                    print "_rerequest did not exit"# + str(proxy_mode) 
        #
        # _ProxyService

        if not self.lock.isfinished():  # still waiting for prior cycle to complete??
            def retry(self = self, s = s, callback = callback):
                self.rerequest(s, callback)
            self.sched(retry, 5)         # retry in 5 seconds
            return
        self.lock.reset()
        rq = Thread(target = self._rerequest, args = [s, callback])
        rq.setName( "TrackerRerequestA"+rq.getName() )
        # Arno: make this a daemon thread so the client closes sooner.
        rq.setDaemon(True)
        rq.start()
 def init(self, callback = None, timeout = 10):
     if callback is None:
         return self._init()
     t = Thread(target=self._init, args=[callback, timeout])
     t.setName('dd-downloader-init-' + t.getName())
     t.setDaemon(True)
     t.start()
 def download(self):
     if DEBUG:
         print 'http-sdownload: download()'
     if self.is_frozen_by_helper():
         if DEBUG:
             print 'http-sdownload: blocked, rescheduling'
         self.resched(1)
         return
     self.cancelled = False
     if self.downloader.picker.am_I_complete():
         self.downloader.downloads.remove(self)
         return
     self.index = self.downloader.picker.next(haveall, self._want, self)
     if self.index is None and self.frozen_by_helper:
         self.resched(0.01)
         return
     if self.index is None and not self.endflag and not self.downloader.peerdownloader.has_downloaders():
         self.endflag = True
         self.index = self.downloader.picker.next(haveall, self._want, self)
     if self.index is None:
         self.endflag = True
         self.resched()
     else:
         self.url = self.seedurl + '&piece=' + str(self.index)
         self._get_requests()
         if self.request_size < self.downloader.storage._piecelen(self.index):
             self.url += '&ranges=' + self._request_ranges()
         rq = Thread(target=self._request)
         rq.setName('HoffmanHTTPDownloader' + rq.getName())
         rq.setDaemon(True)
         rq.start()
         self.active = True
Example #35
0
def start_thread(target, as_daemon, name = None):
    thread = Thread(target = target)
    if name:
        thread.setName(name)
    thread.setDaemon(as_daemon)
    thread.start()
    return thread
Example #36
0
 def initiate_threads(self):
     for i in range(self.config.get("threads").get("number")):
         worker = Thread(target=self.crawl, args=[i])
         worker.setName("Thread-" + str(i))
         worker.start()
         self.all_threads.append(worker)
     return True
Example #37
0
def get_dir(db=None):
    if db is None:
        db = MySQLdb.connect(**config.mysql)
    files = os.listdir(config.torrents_dir)
    hashs = [h[:-8] for h in files if h.endswith(".torrent")]
    cur = db.cursor()
    cur.execute("SELECT hash FROM torrents WHERE created_at IS NULL AND (%s)"  % " OR ".join("hash=%s" for hash in hashs), tuple(hashs))
    ret = [r[0] for r in cur]
    new_hash = set(hashs).difference(ret)
    count = len(new_hash)
    done=0
    new_hashq = queue.Queue()
    [new_hashq.put(h) for h in new_hash]
    pbar = progressbar.ProgressBar(widgets=widget("inserting torrents in db"), maxval=count).start()
    cur.close()
    try:
        threads = []
        for i in range(0, 20):
            t = Thread(target=insert, args=(new_hashq, pbar))
            t.setName("insert-%02d" % i)
            t.daemon = True
            t.start()
            threads.append(t)
        join(threads)
    finally:
        print("")
        db.commit()

    cur.close()
    return ret + list(new_hash)
Example #38
0
def get_reactor():
    """
    Start the Twisted reactor in a separate thread, if not already done.
    Returns the reactor.
    The thread will automatically be destroyed when all the tests are done.
    """
    global _twisted_thread, _reactor

    if _twisted_thread:
        return _reactor

    def reactor_run():
        _reactor.__init__()
        _reactor._startedBefore = False
        _reactor._started = False
        _reactor.run(installSignalHandlers=False)

    from twisted.internet import reactor as twisted_reactor
    _reactor = twisted_reactor

    _twisted_thread = Thread(target=reactor_run)
    _twisted_thread.setName('threaded_reactor')
    _twisted_thread.setDaemon(True)
    _twisted_thread.start()

    # Wait a bit for the reactor to start.
    time.sleep(0.01)
    return _reactor
Example #39
0
    def start(self):
        # doing some initialisation
        if self.master:
            self.root.hash_to_ignore = HashToIgnore()
            self.root.update_hash = set()
            self.root.update_hash_lock = Lock()
            self.root.bad_info_hash = {}
            self.root.good_info_hash = {}
            self.root.last_update_hash = 0
        self.hash_to_fetch = collections.OrderedDict()
        self.hash_to_fetch_tried = collections.defaultdict(set)
        self.hash_to_fetch_totry = collections.defaultdict(set)

        # calling parent method
        super(Crawler, self).start()

        # starting threads
        for f, name in [(self._client_loop, 'client_loop')]:
            t = Thread(target=f)
            t.setName("%s:%s" % (self.prefix, name))
            t.daemon = True
            t.start()
            self._threads.append(t)
            self.threads.append(t)
        if self.master:
            # addings threads to parent threads list
            self.root.client.start()
            self._threads.extend(self.root.client.threads)
            self.threads.extend(self.root.client.threads)
Example #40
0
class DebugServerConnection(Connection):
    """
    Connection type to be used with DebugServer, replaces BLEConnection
    """
    def __init__(self, port=9090):
        super(DebugServerConnection, self).__init__()
        self.notify_handler = None
        self.buf = ""
        self.sock = socket.socket()
        self.sock.connect(('localhost', port))
        self.incoming = []

        self.reader = Thread(target=self._recv)
        self.reader.setName("Debug connection reader")
        self.reader.setDaemon(True)
        self.reader.start()

    def __del__(self):
        self.sock.close()

    def write(self, handle, data):
        payload = {"type": "write", "handle": handle, "data": str2hex(data)}
        self._send(payload)

    def _send(self, payload):
        log.debug("Sending to debug server: %s", payload)
        self.sock.send(json.dumps(payload) + "\n")

    def _recv(self):
        while True:
            data = self.sock.recv(1024)
            log.debug("Recv from debug server: %s", data.strip())
            if not data:
                raise KeyboardInterrupt("Server has closed connection")

            self.buf += data

            while "\n" in self.buf:
                line = self.buf[:self.buf.index("\n")]
                self.buf = self.buf[self.buf.index("\n") + 1:]
                if line:
                    item = json.loads(line)
                    if item['type'] == 'notification' and self.notify_handler:
                        try:
                            self.notify_handler(item['handle'],
                                                unhexlify(item['data']))
                        except BaseException:
                            log.error("Failed to notify handler: %s",
                                      traceback.format_exc())
                    elif item['type'] == 'response':
                        self.incoming.append(item)
                    else:
                        log.warning("Dropped inbound: %s", item)

    def set_notify_handler(self, handler):
        self.notify_handler = handler

    def is_alive(self):
        return self.reader.isAlive()
Example #41
0
 def start(self):
     self.stoped = False
     self.id = str(ID())
     t = Thread(target=self._recv_loop)
     t.setName("Client:recv_loop")
     t.daemon = True
     t.start()
     self.threads.append(t)
Example #42
0
 def aggregate_senddata(self, query):
     url = self.aggregate_forward + '?' + query
     if self.aggregate_password is not None:
         url += '&password='******'AggregateSendData' + rq.getName())
     rq.setDaemon(True)
     rq.start()
Example #43
0
 def place(self, canvas):
     self.model.create_target(canvas)
     t = Thread(target=lambda: self.model.start_control())
     t.start()
     t.setName('control_loop_' + str(Plotter.canvas_index))
     print(str(t.getName()) + ' has been created')
     Plotter.canvas_index += 1
     self.active_models.append(t)
 def predownload(self, callback, timeout = 10):
     if self.lock.locked():
         self.seek = pos
         return
     t = Thread(target=self._predownload, args=[callback, timeout])
     t.setName('dd-downloader-predownload-' + t.getName())
     t.setDaemon(True)
     t.start()
Example #45
0
class ThreadPool(object):
    def wait_completion(self):
        """Pending threads finish."""
        def remaining():
            remainingItems = self.pool.qsize() - 1
                # -1 because we added a None element to stop the queue
            remainingSeconds = ltime.td(seconds=remainingItems)
            return (remainingItems, remainingSeconds)

        self.pool.put((None, [], {}))

        if self.pool.qsize() > 1:
            pywikibot.output(u'Waiting for %i items to be put. Estimated time remaining: %s'
                   % remaining())

        while(self.thread.isAlive()):
            try:
                self.thread.join(1)
            except KeyboardInterrupt:
                answer = pywikibot.inputChoice(u"""\
    There are %i items remaining in the queue. Estimated time remaining: %s
    Really exit?"""
                                         % remaining(),
                                     ['yes', 'no'], ['y', 'N'], 'N')
                if answer == 'y':
                    return

    # Create a separate thread for asynchronous page saves (and other requests)

    def async_manager(self):
        """Daemon; take requests from the queue and execute them in background."""
        while True:
            (request, args, kwargs) = self.pool.get()
            if request is None:
                break
            request(*args, **kwargs)

    def add_task(self, request, *args, **kwargs):
        """Put a request on the queue, and start the daemon if necessary."""
        if not self.thread.isAlive():
            try:
                self.pool.mutex.acquire()
                try:
                    self.thread.start()
                except (AssertionError, RuntimeError):
                    pass
            finally:
                self.pool.mutex.release()
        self.pool.put((request, args, kwargs))

    def __init__(self, numthread, name=None):
        # queue to hold pending requests
        self.pool = Queue(numthread)
        # set up the background thread
        self.thread = Thread(target=self.async_manager)
        # identification for debugging purposes
        self.thread.setName(name or "Thread")
        self.thread.setDaemon(True)
Example #46
0
 def _start_thread(workload, name, interval=None):
     args = [name]
     if interval is not None:
         args.append(interval)
     thread = Thread(target=workload, args=args)
     thread.setName('Metric collector ({0})'.format(name))
     thread.daemon = True
     thread.start()
     return thread
Example #47
0
def test_start_stop(message):
    if message['data']:
        thread = Thread(target=background_thread, args=(request.sid,))
        thread.daemon = True
        thread.setName(request.sid)
        thread_map[request.sid] = True
        thread.start()
    else:
        thread_map[request.sid] = False
Example #48
0
    def publish(self, event):
        if Config.mb_username is None:
            auth = None
        else:
            auth = {"username": Config.mb_username, "password": Config.mb_password}

        payload = event.to_json()

        retry_iterator = IncrementalCeilingListIterator([2, 2, 5, 5, 10, 10, 20, 20, 30, 30, 40, 40, 50, 50, 60], False)

        # Retry to publish the event until the timeout exceeds
        while int(time.time()) - self.__start_time < (Config.mb_publisher_timeout * 1000):
            retry_interval = retry_iterator.get_next_retry_interval()

            for mb_url in Config.mb_urls.split(","):
                mb_ip, mb_port = mb_url.split(":")

                # start a thread to execute publish event
                publisher_thread = Thread(target=self.__publish_event, args=(event, mb_ip, mb_port, auth, payload))
                publisher_thread.setDaemon(True)
                publisher_thread.setName("MBEventPublisherThreadForEvent%s" % event.__class__.__name__)
                self.__log.debug("Starting a publisher thread for event %s " % event.__class__.__name__)
                publisher_thread.start()

                # give sometime for the thread to complete
                time.sleep(5)

                # check if thread is still running and notify
                if publisher_thread.isAlive():
                    self.__log.debug(
                        "Event publishing timed out before succeeding. The message broker could be offline."
                    )

                # check if publish.single() succeeded
                try:
                    published = self.__msg_queue.get(block=False)
                except Empty:
                    published = False

                if published:
                    return True

            # All the brokers on the list were offline
            self.__log.debug(
                "Could not publish event to any of the provided message brokers. Retrying in %s seconds."
                % retry_interval
            )

            time.sleep(retry_interval)

        # Even publisher timeout exceeded
        self.__log.warn(
            "Could not publish event to any of the provided message brokers before "
            "the timeout [%s] exceeded. The event will be dropped." % Config.mb_publisher_timeout
        )
        return False
Example #49
0
def feed_torcache(db, hashs=None):
    if hashs is None:
        cur = db.cursor()
        cur.execute("SELECT hash FROM torrents WHERE created_at IS NOT NULL AND torcache IS NULL")
        hashs = [r[0].lower() for r in cur]
    if not hashs:
        return

    count = len(hashs)
    hashsq = queue.Queue()
    [hashsq.put(h) for h in hashs]
    counter = [0, 0, 0]
    pbar = progressbar.ProgressBar(widgets=widget("torrents uploaded to torcache"), maxval=count).start()
    def upload(hashsq, counter):
        db = MySQLdb.connect(**config.mysql)
        cur = db.cursor()
        try:
            while True:
                hash = hashsq.get(timeout=0)
                tc = is_torcache(hash)
                if not tc and os.path.isfile("%s/%s.torrent" % (config.torrents_dir, hash)):
                    try:
                        upload_to_torcache(db, hash, quiet=True)
                        counter[2]+=1
                    except requests.ConnectionError:
                        hashsq.put(hash)
                elif tc:
                    cur.execute("UPDATE torrents SET torcache=%s WHERE hash=%s", (True, hash))
                    counter[0]+=1
                else:
                    cur.execute("UPDATE torrents SET torcache=%s WHERE hash=%s", (False, hash))
                    counter[1]+=1
                pbar.update(pbar.currval + 1)
        except queue.Empty:
            pass
        finally:
            db.commit()
            cur.close()
            db.close()

    cur.close()
    stdout = sys.stdout
    try:
        threads = []
        sys.stdout = ThreadWriter(stdout)
        for i in range(0, 20):
            t = Thread(target=upload, args=(hashsq, counter))
            t.setName("upload-%02d" % i)
            t.daemon = True
            t.start()
            threads.append(t)
        join(threads)
    finally:
        sys.stdout = stdout
        pbar.finish()
    print("%s uploaded, %s already upped, %s failed" % (counter[0], counter[2], counter[1]))
Example #50
0
    def __call__(self, *args, **kwargs):
        _t = Thread(target=self.wrapper, args=args, kwargs=kwargs)
        _t.setName(self.__func.__name__)
        _t.start()

        self.__c.acquire()
        while not self.__done:
            self.__c.wait()
        self.__c.release()
        del _t
        return deepcopy(self.__result)
Example #51
0
    def _start(self):
        for th_n in range(self.NUM_OF_WORKERS):
            worker = Thread(target=self._do_search)
            worker.setName('_do_search_{}'.format(str(th_n)))
            self._threads.append(worker)
            worker.start()

        self.page_queue.put(self._page())
        if not self.SLOW_START:
            for _ in range(self.NUM_OF_WORKERS - 1):
                self.page_queue.put(self._page.inc())
 def rerequest(self, s, callback):
     if not self.lock.isfinished():  # still waiting for prior cycle to complete??
         def retry(self = self, s = s, callback = callback):
             self.rerequest(s, callback)
         self.sched(retry, 5)         # retry in 5 seconds
         return
     self.lock.reset()
     rq = Thread(target = self._rerequest, args = [s, callback])
     rq.setName( "TrackerRerequestA"+rq.getName() )
     # Arno: make this a daemon thread so the client closes sooner.
     rq.setDaemon(True)
     rq.start()
 def run(self):
     """
     Start the scheduled task with a sleep time of delay in between
     :return:
     """
     while not self.terminated:
         time.sleep(self.delay)
         task_thread = Thread(target=self.task.execute_task)
         task_thread.setName("WorkerThreadForTask%s" % self.task.__class__.__name__)
         task_thread.setDaemon(True)
         log.debug("Starting a worker thread for the Scheduled Executor for task %s" % self.task.__class__.__name__)
         task_thread.start()
 def start(self, pos = 0):
     if self.storage is None:
         raise Exception('Storage is not set')
     if self.final_url is None:
         raise Exception('Final url is not set')
     if self.lock.locked():
         self.seek = pos
         return
     t = Thread(target=self._request, args=[pos])
     t.setName('dd-downloader-' + t.getName())
     t.setDaemon(True)
     t.start()
class BasicModule(IModule):

    # lock
    InstanceLock = Lock()

    """ -------------------------------------------------------------------- """
    """ __init__                                                             """
    """ -------------------------------------------------------------------- """
    def __init__(self, name, *p, **k):

        # set name
        self.name = name

        # logger
        self.logger = Activator().getInstance('LoggerFactory').getLogger(self.name)

        # request-handler
        self.requestHandler = None

        # onStop-delegates
        self.onStopDelegates = []

        # running-flag
        self.running = False

        # listener-thread
        self.thread = Thread(target = self.run)
        self.thread.setName(self.name)
        self.thread.setDaemon(True)

    """ -------------------------------------------------------------------- """
    """ run                                                                  """
    """ -------------------------------------------------------------------- """
    def run(self):

        # log
        self.logger.info("up and running")

        # running-flag
        self.running = True

        # module-main
        try:
            self.main()
        except Exception, e:
            if self.running:
                self.logger.error("Exception in Module-Thread (%s)" % (e))

        # shutdown
        self.shutdown()
Example #56
0
 def __init__(self,w_lista_gruppo=[("1"),("2"),("3"),("4"),("5"),("6"),("7"),("8"),("9"),("10"),("11"),("12")]):
     threads = []        
     for w_gruppo in w_lista_gruppo:
         factsGenT = Thread(target=runSingleExtractor, args=(self,w_gruppo))    
         factsGenT.setName('factsgen_'+w_gruppo)
         factsGenT.start()
         threads.append(factsGenT)
     threadAlive = True
     while threadAlive:
         threadAlive = False
         for t in threads:
             if t.isAlive():
                 threadAlive = True
     print "Esecuzione Terminata"
Example #57
0
    def _initialize(self):
        # pycurl Connection pool
        self._create_pool(self.options.get("concurrent"))

        # internal pool
        self.default_poolid = self._new_pool()

        # create threads
        self.ths = []

        for fn in ("_read_multi_stack", "_read_retry_queue"):
            th = Thread(target=getattr(self, fn))
            th.setName(fn)
            self.ths.append(th)
            th.start()
Example #58
0
def get_hash(db=None, insert_new=False, dir_only=False, name_null=False):
    if db is None:
        db = MySQLdb.connect(**config.mysql)          
    cur = db.cursor()
    files = os.listdir(config.torrents_dir)
    hashs = [h[:-8] for h in files if h.endswith(".torrent")]
    if insert_new:
        khashs = set()
        i=0
        while hashs[i:i+50]:
            query = "SELECT hash FROM torrents WHERE (%s)"  % " OR ".join("hash=%s" for hash in hashs[i:i+50])
            cur.execute(query, tuple(hashs[i:i+50]))
            ret = [r[0] for r in cur]
            khashs = khashs.union(ret)
            i+=50
        new_hash = set(hashs).difference(khashs)
        count = len(new_hash)
        if count > 0:
            done=0
            new_hashq = queue.Queue()
            [new_hashq.put(h) for h in new_hash]
            pbar = progressbar.ProgressBar(widgets=widget("inserting torrents in db"), maxval=count).start()

            try:
                threads = []
                for i in range(0, 20):
                    t = Thread(target=insert, args=(new_hashq, pbar))
                    t.setName("insert-%02d" % i)
                    t.daemon = True
                    t.start()
                    threads.append(t)
                join(threads)
            finally:
                pbar.finish()
    if dir_only:
        if name_null and hashs:
            query = "SELECT hash FROM torrents WHERE created_at IS NULL AND (%s)"  % " OR ".join("hash=%s" for hash in hashs)
            cur.execute(query, tuple(hashs))
            ret = [r[0] for r in cur]
            cur.close()
            return ret
        return hashs
    else:
        query = "SELECT hash FROM torrents WHERE created_at IS NULL AND (dht_last_get >= DATE_SUB(NOW(), INTERVAL 1 HOUR) OR dht_last_announce >= DATE_SUB(NOW(), INTERVAL 1 HOUR) OR %s)"  % " OR ".join("hash=%s" for hash in hashs)
    cur.execute(query, tuple(hashs))
    ret = [r[0] for r in cur]
    cur.close()
    return ret
Example #59
0
 def start(self):
     self._start = time.time()
     self._stopped = False
     MetricsCollector._start_thread(self._load_environment_configurations, 'load_configuration', 900)
     MetricsCollector._start_thread(self._run_system, 'system')
     MetricsCollector._start_thread(self._run_outputs, 'output')
     MetricsCollector._start_thread(self._run_sensors, 'sensor')
     MetricsCollector._start_thread(self._run_thermostats, 'thermostat')
     MetricsCollector._start_thread(self._run_errors, 'error')
     MetricsCollector._start_thread(self._run_pulsecounters, 'counter')
     MetricsCollector._start_thread(self._run_power_openmotics, 'energy')
     MetricsCollector._start_thread(self._run_power_openmotics_analytics, 'energy_analytics')
     thread = Thread(target=self._sleep_manager)
     thread.setName('Metric collector - Sleep manager')
     thread.daemon = True
     thread.start()