def create_client(amqp_host=broker_config.broker_hostname,
                  amqp_user=broker_config.broker_username,
                  amqp_pass=broker_config.broker_password,
                  ssl_enabled=broker_config.broker_ssl_enabled,
                  ssl_cert_path=broker_config.broker_cert_path):
    try:
        logger.debug('creating a new AMQP client for thread {0}, using'
                     ' hostname; {1}, username: {2}, ssl_enabled: {3},'
                     ' cert_path: {4}'.
                     format(current_thread(),
                            broker_config.broker_hostname,
                            broker_config.broker_username,
                            broker_config.broker_ssl_enabled,
                            broker_config.broker_cert_path))

        client = AMQPClient(amqp_host=amqp_host, amqp_user=amqp_user,
                            amqp_pass=amqp_pass, ssl_enabled=ssl_enabled,
                            ssl_cert_path=ssl_cert_path)

        logger.debug('AMQP client created for thread {0}'.
                     format(current_thread()))
    except Exception as e:
        err_msg = 'Failed to create AMQP client for thread: {0}, error: {1}'.\
            format(current_thread(), e)
        logger.warning(err_msg)
        raise

    return client
Ejemplo n.º 2
0
    def pull(git_repo):
        # check if modified files are present
        modified = AgentGitHandler.has_modified_files(git_repo.local_repo_path)
        if modified:
            if Config.is_commits_enabled:
                AgentGitHandler.log.debug(
                    "Un-staged files exist in working directory. Aborting git pull for this iteration...")
                return
            else:
                AgentGitHandler.log.warn("Changes detected in working directory but COMMIT_ENABLED is set to false!")
                AgentGitHandler.log.warn("Attempting to reset the working directory")
                AgentGitHandler.execute_git_command(["reset"], repo_path=git_repo.local_repo_path)

        # HEAD before pull
        (init_head, init_errors) = AgentGitHandler.execute_git_command(["rev-parse", "HEAD"], git_repo.local_repo_path)

        repo = Repo(git_repo.local_repo_path)
        AgentGitHandler.execute_git_command(["pull", "--rebase", "origin", git_repo.branch], git_repo.local_repo_path)
        AgentGitHandler.log.debug("Git pull rebase executed in checkout job")
        if repo.is_dirty():
            AgentGitHandler.log.error("Git pull operation in checkout job left the repository in dirty state")
            AgentGitHandler.log.error(
                "Git pull operation on remote %s for tenant %s failed" % (git_repo.repo_url, git_repo.tenant_id))

        # HEAD after pull
        (end_head, end_errors) = AgentGitHandler.execute_git_command(["rev-parse", "HEAD"], git_repo.local_repo_path)

        # check if HEAD was updated
        if init_head != end_head:
            AgentGitHandler.log.debug("Artifacts were updated as a result of the pull operation, thread: %s - %s" %
                                      (current_thread().getName(), current_thread().ident))

            return True
        else:
            return False
 def wrap(*args, **kwargs):
     self = args[0]
     if hasattr(self, 'db'):
         assert self.db.context.thread.workerThread == threading.current_thread()
     if hasattr(self, 'context'):
         assert self.context.thread.workerThread == threading.current_thread()
     return fn(*args, **kwargs)
Ejemplo n.º 4
0
    def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, worker_handler, task_handler, result_handler, cache):
        debug('finalizing pool')
        worker_handler._state = TERMINATE
        task_handler._state = TERMINATE
        debug('helping task handler/workers to finish')
        cls._help_stuff_finish(inqueue, task_handler, len(pool))
        if not (result_handler.is_alive() or len(cache) == 0):
            raise AssertionError
            result_handler._state = TERMINATE
            outqueue.put(None)
            debug('joining worker handler')
            if threading.current_thread() is not worker_handler:
                worker_handler.join(1e+100)
            if pool and hasattr(pool[0], 'terminate'):
                debug('terminating workers')
                for p in pool:
                    if p.exitcode is None:
                        p.terminate()

            debug('joining task handler')
            if threading.current_thread() is not task_handler:
                task_handler.join(1e+100)
            debug('joining result handler')
            if threading.current_thread() is not result_handler:
                result_handler.join(1e+100)
            pool and hasattr(pool[0], 'terminate') and debug('joining pool workers')
            for p in pool:
                if p.is_alive():
                    debug('cleaning up worker %d' % p.pid)
                    p.join()

        return
Ejemplo n.º 5
0
 def free(self):
     i = threading.current_thread().ident
     if i in self.buff_dict:
         buff = self.buff_dict.pop(threading.current_thread().ident)
         buff.seek(0)
         buff.truncate()
         self.buff_queue.append(buff)
Ejemplo n.º 6
0
def _client_worker_process(factory, input_queue, output_queue, is_shutdown):
    """ This worker process takes input requests, issues them on its
    client handle, and then sends the client response (success or failure)
    to the manager to deliver back to the application.

    It should be noted that there are N of these workers and they can
    be run in process or out of process as all the state serializes.

    :param factory: A client factory used to create a new client
    :param input_queue: The queue to pull new requests to issue
    :param output_queue: The queue to place client responses
    :param is_shutdown: Condition variable marking process shutdown
    """
    log.info("starting up worker : %s", threading.current_thread())
    client = factory()
    while not is_shutdown.is_set():
        try:
            workitem = input_queue.get(timeout=1)
            log.debug("dequeue worker request: %s", workitem)
            if not workitem: continue
            try:
                log.debug("executing request on thread: %s", workitem)
                result = client.execute(workitem.request)
                output_queue.put(WorkResponse(False, workitem.work_id, result))
            except Exception as exception:
                log.exception("error in worker "
                              "thread: %s", threading.current_thread())
                output_queue.put(WorkResponse(True,
                                              workitem.work_id, exception))
        except Exception as ex:
            pass
    log.info("request worker shutting down: %s", threading.current_thread())
def eventCreator():
    aLotOfData = []
    es_conn = tools.get_es_connection()
    while True:
        d = q.get()
        m = json.loads(d)
        data = {
            '_type': 'netflow_lhcopn'
        }
        if not 'data'in m:
            print(threading.current_thread().name, 'no data in this message!')
            q.task_done()
            continue

        source = m['data']['src_site']
        destination = m['data']['dst_site']
        data['MA'] = 'capc.cern'
        data['srcInterface'] = source
        data['dstInterface'] = destination
        ts = m['data']['timestamp']
        th = m['data']['throughput']
        dati = datetime.utcfromtimestamp(float(ts))
        data['_index'] = "network_weather-" + \
            str(dati.year) + "." + str(dati.month) + "." + str(dati.day)
        data['timestamp'] = int(float(ts) * 1000)
        data['utilization'] = int(th)
        # print(data)
        aLotOfData.append(copy.copy(data))

        q.task_done()
        if len(aLotOfData) > 10:
            succ = tools.bulk_index(aLotOfData, es_conn=es_conn, thread_name=threading.current_thread().name)
            if succ is True:
                aLotOfData = []
Ejemplo n.º 8
0
def application_unproxied(environ, start_response):
    """ WSGI entry point."""
    # cleanup db/uid trackers - they're set at HTTP dispatch in
    # web.session.OpenERPSession.send() and at RPC dispatch in
    # openerp.service.web_services.objects_proxy.dispatch().
    # /!\ The cleanup cannot be done at the end of this `application`
    # method because werkzeug still produces relevant logging afterwards 
    if hasattr(threading.current_thread(), 'uid'):
        del threading.current_thread().uid
    if hasattr(threading.current_thread(), 'dbname'):
        del threading.current_thread().dbname

    openerp.service.start_internal()

    # Try all handlers until one returns some result (i.e. not None).
    wsgi_handlers = [wsgi_xmlrpc_1, wsgi_xmlrpc, wsgi_xmlrpc_legacy, wsgi_webdav]
    wsgi_handlers += module_handlers
    for handler in wsgi_handlers:
        result = handler(environ, start_response)
        if result is None:
            continue
        return result

    # We never returned from the loop.
    response = 'No handler found.\n'
    start_response('404 Not Found', [('Content-Type', 'text/plain'), ('Content-Length', str(len(response)))])
    return [response]
Ejemplo n.º 9
0
def _manager_worker_process(output_queue, futures, is_shutdown):
    """ This worker process manages taking output responses and
    tying them back to the future keyed on the initial transaction id.
    Basically this can be thought of as the delivery worker.

    It should be noted that there are one of these threads and it must
    be an in process thread as the futures will not serialize across
    processes..

    :param output_queue: The queue holding output results to return
    :param futures: The mapping of tid -> future
    :param is_shutdown: Condition variable marking process shutdown
    """
    log.info("starting up manager worker: %s", threading.current_thread())
    while not is_shutdown.is_set():
        try:
            workitem = output_queue.get()
            future = futures.get(workitem.work_id, None)
            log.debug("dequeue manager response: %s", workitem)
            if not future: continue
            if workitem.is_exception:
                future.set_exception(workitem.response)
            else: future.set_result(workitem.response)
            log.debug("updated future result: %s", future)
            del futures[workitem.work_id]
        except Exception as ex:
            log.exception("error in manager")
    log.info("manager worker shutting down: %s", threading.current_thread())
Ejemplo n.º 10
0
 def acquire(self):
     print('#' * 120, file=sys.stderr)
     print('acquire called: thread id:', current_thread(), 'shared:', self._is_shared, file=sys.stderr)
     traceback.print_stack()
     RWLockWrapper.acquire(self)
     print('acquire done: thread id:', current_thread(), file=sys.stderr)
     print('_' * 120, file=sys.stderr)
Ejemplo n.º 11
0
 def release(self, *args):
     print('*' * 120, file=sys.stderr)
     print('release called: thread id:', current_thread(), 'shared:', self._is_shared, file=sys.stderr)
     traceback.print_stack()
     RWLockWrapper.release(self)
     print('release done: thread id:', current_thread(), 'is_shared:', self._shlock.is_shared, 'is_exclusive:', self._shlock.is_exclusive, file=sys.stderr)
     print('_' * 120, file=sys.stderr)
Ejemplo n.º 12
0
    def run(self):
        while True:
            try:
                url, deep = self.pool.urls_queue.get(block = True, timeout = 5)
                #url, deep = self.pool.urls_queue.get(block = False)
            except Queue.Empty:
                #time.sleep(5) # 非阻塞时使用
                logging.debug("%s, qsize: %s"%(threading.current_thread().getName(),
                                  self.pool.urls_queue.qsize()))
                if not self.pool.busy():
                    break
            else:
                self.running = True
                #print "%s, url=%s, deep=%s, qsize=%s"%(
                #      threading.current_thread().getName(), url, deep, self.pool.urls_queue.qsize())
                _spider = Spider(url, deep, self.key)
                links = _spider.spider()
                if links:
                    #print "++++++++++ In WorkThread: %s, links_lenth: %s ++++++++++"%(
                    #        threading.current_thread().getName(), len(links))
                    for i in links:
                        hash_code = md5.new(i.encode('utf-8')).hexdigest()

                        self.pool.saved_urls_lock.acquire()
                        if not hash_code in self.pool.saved_urls:
                            self.pool.saved_urls[hash_code] = i
                            self.pool.urls_queue.put((i, deep - 1))
                        #千万不能忘记解锁啊!!!
                        self.pool.saved_urls_lock.release()
                self.running = False
                self.pool.urls_queue.task_done()
        logging.info("%s, ended*******\n"%(threading.current_thread().getName()))
Ejemplo n.º 13
0
 def _thread_worker(self):
     """
     A worker that does actual jobs
     """
     self.log.debug("Starting shooter thread %s", th.current_thread().name)
     while not self.quit.is_set():
         try:
             task = self.task_queue.get(timeout=1)
             if not task:
                 self.log.info(
                     "%s got killer task.", th.current_thread().name)
                 break
             timestamp, missile, marker = task
             planned_time = self.start_time + (timestamp / 1000.0)
             delay = planned_time - time.time()
             if delay > 0:
                 time.sleep(delay)
             self.gun.shoot(missile, marker, self.results)
         except (KeyboardInterrupt, SystemExit):
             break
         except Empty:
             if self.quit.is_set():
                 self.log.debug(
                     "Empty queue. Exiting thread %s",
                     th.current_thread().name)
                 return
         except Full:
             self.log.warning(
                 "Couldn't put to result queue because it's full")
     self.log.debug("Exiting shooter thread %s", th.current_thread().name)
Ejemplo n.º 14
0
def run_using_threadpool(fn_to_execute, inputs, pool_size):
  """For internal use only; no backwards-compatibility guarantees.

  Runs the given function on given inputs using a thread pool.

  Args:
    fn_to_execute: Function to execute
    inputs: Inputs on which given function will be executed in parallel.
    pool_size: Size of thread pool.
  Returns:
    Results retrieved after executing the given function on given inputs.
  """

  # ThreadPool crashes in old versions of Python (< 2.7.5) if created
  # from a child thread. (http://bugs.python.org/issue10015)
  if not hasattr(threading.current_thread(), '_children'):
    threading.current_thread()._children = weakref.WeakKeyDictionary()
  pool = ThreadPool(min(pool_size, len(inputs)))
  try:
    # We record and reset logging level here since 'apitools' library Beam
    # depends on updates the logging level when used with a threadpool -
    # https://github.com/google/apitools/issues/141
    # TODO: Remove this once above issue in 'apitools' is fixed.
    old_level = logging.getLogger().level
    return pool.map(fn_to_execute, inputs)
  finally:
    pool.terminate()
    logging.getLogger().setLevel(old_level)
Ejemplo n.º 15
0
def worker_func():
    print('worker thread started in %s' % (threading.current_thread()))
    # 改变随机数生成器的种子
    random.seed()
    # 让线程睡眠s随机一段时间
    time.sleep(random.random())
    print('worker thread finished in %s' % (threading.current_thread()))
Ejemplo n.º 16
0
    def __repr__(self):
        "x.__repr__() <==> repr(x)"

        name = self.__class__.__name__

        if hasattr(self, "channel") and self.channel is not None:
            channel = "/%s" % self.channel
        else:
            channel = ""

        q = len(self._queue)
        c = len(self.channels)
        h = len(self._handlers)
        state = self.state

        if HAS_MULTIPROCESSING == 2:
            pid = current_process().ident
        if HAS_MULTIPROCESSING == 1:
            pid = current_process().getPid()
        else:
            pid = os.getpid()

        if pid:
            id = "%s:%s" % (pid, current_thread().getName())
        else:
            id = current_thread().getName()

        format = "<%s%s %s (queued=%d, channels=%d, handlers=%d) [%s]>"
        return format % (name, channel, id, q, c, h, state)
Ejemplo n.º 17
0
def loop():
    print 'thread %s is running...' % threading.current_thread().name
    n = 0
    while n < 5:
        n = n + 1
        print 'thread %s >> %s' % (threading.current_thread().name, n)
    print 'thread %s ended.' % threading.current_thread().name
Ejemplo n.º 18
0
  def test_lifecycle(self):
    """Verify we store multiple objects as a list of snapshots."""
    first = TestData('first', 1, TestDetails())
    second = TestData('second', 2)

    journal = TestJournal(BytesIO())

    journal.store(first)
    journal.store(second)
    journal.terminate()

    decoder = json.JSONDecoder()
    got_stream = RecordInputStream(BytesIO(journal.final_content))
    got_str = [e for e in got_stream]
    got_json = '[{0}]'.format(','.join(got_str))
    got = decoder.decode(got_json)
    self.assertEquals(4, len(got))

    snapshot = JsonSnapshot()
    snapshot.add_object(first)
    json_object = snapshot.to_json_object()
    json_object['_timestamp'] = journal.clock.last_time - 1
    json_object['_thread'] = threading.current_thread().ident
    self.assertItemsEqual(json_object, got[1])

    snapshot = JsonSnapshot()
    snapshot.add_object(second)
    json_object = snapshot.to_json_object()
    json_object['_timestamp'] = journal.clock.last_time
    json_object['_thread'] = threading.current_thread().ident
    self.assertItemsEqual(json_object, got[2])
Ejemplo n.º 19
0
	def do_job(self,args):

		if self.isThread==1:
			print threading.current_thread(),args
		else:
			print datetime.datetime.now()
			print multiprocess.current_process(),args
Ejemplo n.º 20
0
 def fetch_results():
   threading.current_thread().fetch_results_error = None
   try:
     new_client = self.create_impala_client()
     new_client.fetch(query, handle)
   except ImpalaBeeswaxException as e:
     threading.current_thread().fetch_results_error = e
Ejemplo n.º 21
0
	def validateItem(self, item):
		''' 
		Some news items may miss title, link, description and/or date attributes.
		If that's the case, then add those attributes to the item object.
		'''
		attr = ['title', 'description', 'link' ]

		for a in attr:
			if not hasattr(item, a):
				logging.warning(u'{0} Item has no attribute [{1}]'.format(current_thread().name, a))
				setattr(item, a, '')

		# Strip html tags and entity names as soon as possible
		item.title = self.stripHTMLEntityNames(item.title)
		item.description = self.stripTags(item.description)

		if hasattr(item, 'updated_parsed'):
			return
		elif hasattr(item, 'published_parsed'):
			setattr(item, 'updated_parsed', item.published_parsed)
			return
		elif hasattr(item, 'created_parsed'):
			setattr(item, 'updated_parsed', item.created_parsed)
			return
		else:
			logging.warning(u'{0} Can\'t determine item date: {1}. Will use default'.format(current_thread().name, item.title))
			setattr(item, 'updated_parsed', time.gmtime())
Ejemplo n.º 22
0
	def submit(self, items, submitType):
		""" Submit the news items to the forum """

		# If there is nothing to submit, just return
		if not items: 
			return

		curDate  = datetime.date.today()
		newMonth = (curDate == datetime.date(curDate.year, curDate.month, 1))
		#subject  = u'Ежедневные новости науки - ' + months[curDate.month - 1] + u', ' + str(curDate.year).encode('utf-8')
		subject  = u'Ежедневные новости науки - {0}, {1}'.format(months[curDate.month - 1], str(curDate.year)) 

		if newMonth or subject.encode('utf-8') != self.getCurrentTopic('subject'):
			self.log.info("{0} It's a new month".format(current_thread().name))
			self.mech.open("http://www.ateism.ru/forum/posting.php?mode=post&f=35")
			self.mech.select_form(nr=1)
			self.mech.form['subject'] = subject.encode('utf-8') 
		else:
			self.mech.open(self.getCurrentTopic())
			self.mech.select_form(nr=1)

		self.mech.form['message'] = u''.join([unicode(i) for i in items]).encode('utf-8')

		self.log.info(u'{0} Submitting {1} items... Submit type: {2}'.format(current_thread().name, len(items), submitType))
		#self.mech.submit(name='post')
		if submitType == 'post' or submitType == 'preview':
			self.mech.submit(name=submitType)
		else:
			self.log.warning(u'{0} Unknown submit type: [{1}]'.format(current_thread().name, submitType))

		self.log.info(u'{0} Submission [OK]'.format(current_thread().name))
		
		if newMonth:
			self.updateTopicURL(subject)
    def _worker(self):
        '''
        This is the worker which will get the image from 'inbox',
        calculate the hash and puts the result in 'outbox'
        '''

        while not self.shutdown.isSet():
            
            try:
                image_path = self.inbox.get_nowait()
            except Empty:
                print 'no data found. isset: ' , self.done.isSet()
                if not self.done.isSet():
                    with self.empty:
                        self.empty.wait()
                        continue
                else:
                    break

            if not os.path.exists(image_path):
                self.error((image_path, 'Image Does not Exist'))
                
            try:
                print '[%s] Processing %s' % (current_thread().ident, image_path)
                image_hash = average_hash(image_path)
                self.outbox.put((image_hash, image_path))
            except IOError as err:
                print 'ERROR: Got %s for image : %s' % (image_path, err)
        print 'Worker %s has done processing.' % current_thread().ident
Ejemplo n.º 24
0
def remove_heart_log(*args, **kwargs):
    if six.PY2:
        if threading.current_thread().name == 'MainThread':
            debug_log(*args, **kwargs)
    else:
        if threading.current_thread() == threading.main_thread():
            debug_log(*args, **kwargs)
Ejemplo n.º 25
0
def send_request():
    import requests
    print threading.current_thread().name
    url = 'http://localhost:9999/hello/' + threading.current_thread().name
    response=requests.get(url)

    print response.content
Ejemplo n.º 26
0
    def _threaded_resolve_AS():
        """Get an ASN from the queue, resolve it, return its routes to the
        *main* process and repeat until signaled to stop.
        This function is going to be spawned as a thread.
        """
        while True:
            current_AS = q.get()
            if current_AS == 'KILL':
                q.task_done()
                break

            try:
                resp = comm.get_routes_by_autnum(current_AS, ipv6_enabled=True)
                if resp is None:
                    raise LookupError
                routes = parsers.parse_AS_routes(resp)
            except LookupError:
                logging.warning("{}: {}: No Object found for {}"
                                .format(mp.current_process().name,
                                        threading.current_thread().name,
                                        current_AS))
                routes = None
            except Exception as e:
                logging.error("{}: {}: Failed to resolve DB object {}. {}"
                              .format(mp.current_process().name,
                                      threading.current_thread().name,
                                      current_AS, e))
                routes = None
            result_q.put((current_AS, routes))
            q.task_done()
Ejemplo n.º 27
0
 def wrapper(*args, **kw):
     print("entering %s for thread %s:%s"
           %(fn.func_name, getpid(), current_thread()))
     ret = fn(*args, **kw)
     print("leaving %s for thread %s:%s"
           %(fn.func_name, getpid(), current_thread()))
     return ret
Ejemplo n.º 28
0
    def init(self, params):
        self.params = dict(params)
        # OpenERP session setup
        self.session_id = self.params.pop("session_id", None) or uuid.uuid4().hex
        self.session = self.httpsession.get(self.session_id)
        if not self.session:
            self.session = session.OpenERPSession()
            self.httpsession[self.session_id] = self.session

        # set db/uid trackers - they're cleaned up at the WSGI
        # dispatching phase in openerp.service.wsgi_server.application
        if self.session._db:
            threading.current_thread().dbname = self.session._db
        if self.session._uid:
            threading.current_thread().uid = self.session._uid

        self.context = self.params.pop('context', {})
        self.debug = self.params.pop('debug', False) is not False
        # Determine self.lang
        lang = self.params.get('lang', None)
        if lang is None:
            lang = self.context.get('lang')
        if lang is None:
            lang = self.httprequest.cookies.get('lang')
        if lang is None:
            lang = self.httprequest.accept_languages.best
        if not lang:
            lang = 'en_US'
        # tranform 2 letters lang like 'en' into 5 letters like 'en_US'
        lang = babel.core.LOCALE_ALIASES.get(lang, lang)
        # we use _ as seprator where RFC2616 uses '-'
        self.lang = lang.replace('-', '_')
Ejemplo n.º 29
0
def show_thread(q, extraByteCodes):
    for i in range(5):
        for j in range(extraByteCodes):
            pass
        # q.put(threading.current_thread().name)
        print threading.current_thread().name
    return
Ejemplo n.º 30
0
def run_script_remote(node):
    print current_thread().name+": Execution of "+script+".sh on node "+node+" started."
    client = paramiko.SSHClient()
    client.load_system_host_keys()
    client.set_missing_host_key_policy(paramiko.WarningPolicy())
    client.connect(node, username='******')
    
    channel = client.invoke_shell()
    channel.send('sudo chmod 744 '+script+'.sh\n')
    out = ''
    while not out.endswith('$ '):
        resp = channel.recv(1024)
        out += resp

    # Reading the output back seems to be the only way to 
    # make sure the update finishes
    channel.send('./'+script+'.sh\n')
    out = ''
    while not out.endswith('$ '):
        resp = channel.recv(1024)
        out += resp

    #add the newline to the node output
    out += '\n'

    # write the update's output to a log file, just for sanity
    f = open(node+'_'+script+'.log', 'wb')
    f.write(out)
    f.close()

    client.close()
    print current_thread().name+": Finished on node "+node+"\nCheck this node's script log file to make sure there were no errors."   
Ejemplo n.º 31
0
 def _is_master_thread(self):
     return threading.current_thread().ident == self._master_thread
Ejemplo n.º 32
0
# of threading 
import threading 
import os 
  
def task1(): 
    print("Task 1 assigned to thread: {}".format(threading.current_thread().name)) 
    print("ID of process running task 1: {}".format(os.getpid())) 
  
def task2(): 
    print("Task 2 assigned to thread: {}".format(threading.current_thread().name)) 
    print("ID of process running task 2: {}".format(os.getpid())) 
  
if __name__ == "__main__": 
  
    # print ID of current process 
    print("ID of process running main program: {}".format(os.getpid())) 
  
    # print name of main thread 
    print("Main thread name: {}".format(threading.current_thread().name)) 
  
    # creating threads 
    t1 = threading.Thread(target=task1, name='t1') 
    t2 = threading.Thread(target=task2, name='t2')   
  
    # starting threads 
    t1.start() 
    t2.start() 
  
    # wait until all threads finish 
    t1.join() 
    t2.join() 
Ejemplo n.º 33
0
    def __call__(self, a):
        m = _get_backing_memmap(a)
        if m is not None and isinstance(m, np.memmap):
            # a is already backed by a memmap file, let's reuse it directly
            return _reduce_memmap_backed(a, m)

        if (not a.dtype.hasobject and self._max_nbytes is not None
                and a.nbytes > self._max_nbytes):
            # check that the folder exists (lazily create the pool temp folder
            # if required)
            try:
                os.makedirs(self._temp_folder)
                os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    raise e

            try:
                basename = self._memmaped_arrays.get(a)
            except KeyError:
                # Generate a new unique random filename. The process and thread
                # ids are only useful for debugging purpose and to make it
                # easier to cleanup orphaned files in case of hard process
                # kill (e.g. by "kill -9" or segfault).
                basename = "{}-{}-{}.pkl".format(
                    os.getpid(), id(threading.current_thread()),
                    uuid4().hex)
                self._memmaped_arrays.set(a, basename)
            filename = os.path.join(self._temp_folder, basename)

            # In case the same array with the same content is passed several
            # times to the pool subprocess children, serialize it only once

            is_new_memmap = filename not in self._temporary_memmaped_filenames

            # add the memmap to the list of temporary memmaps created by joblib
            self._temporary_memmaped_filenames.add(filename)

            if self._unlink_on_gc_collect:
                # Bump reference count of the memmap by 1 to account for
                # shared usage of the memmap by a child process. The
                # corresponding decref call will be executed upon calling
                # resource_tracker.maybe_unlink, registered as a finalizer in
                # the child.
                # the incref/decref calls here are only possible when the child
                # and the parent share the same resource_tracker. It is not the
                # case for the multiprocessing backend, but it does not matter
                # because unlinking a memmap from a child process is only
                # useful to control the memory usage of long-lasting child
                # processes, while the multiprocessing-based pools terminate
                # their workers at the end of a map() call.
                resource_tracker.register(filename, "file")

            if is_new_memmap:
                # Incref each temporary memmap created by joblib one extra
                # time.  This means that these memmaps will only be deleted
                # once an extra maybe_unlink() is called, which is done once
                # all the jobs have completed (or been canceled) in the
                # Parallel._terminate_backend() method.
                resource_tracker.register(filename, "file")

            if not os.path.exists(filename):
                util.debug(
                    "[ARRAY DUMP] Pickling new array (shape={}, dtype={}) "
                    "creating a new memmap at {}".format(
                        a.shape, a.dtype, filename))
                for dumped_filename in dump(a, filename):
                    os.chmod(dumped_filename, FILE_PERMISSIONS)

                if self._prewarm:
                    # Warm up the data by accessing it. This operation ensures
                    # that the disk access required to create the memmapping
                    # file are performed in the reducing process and avoids
                    # concurrent memmap creation in multiple children
                    # processes.
                    load(filename, mmap_mode=self._mmap_mode).max()

            else:
                util.debug(
                    "[ARRAY DUMP] Pickling known array (shape={}, dtype={}) "
                    "reusing memmap file: {}".format(
                        a.shape, a.dtype, os.path.basename(filename)))

            # The worker process will use joblib.load to memmap the data
            return ((load_temporary_memmap, (filename, self._mmap_mode,
                                             self._unlink_on_gc_collect)))
        else:
            # do not convert a into memmap, let pickler do its usual copy with
            # the default system pickler
            util.debug(
                '[ARRAY DUMP] Pickling array (NO MEMMAPPING) (shape={}, '
                ' dtype={}).'.format(a.shape, a.dtype))
            return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL), ))
Ejemplo n.º 34
0
Archivo: me.py Proyecto: robgil/pulsar
 def _callback():
     d.set_result(current_thread().ident)
Ejemplo n.º 35
0
def hello():
    print('Hello world! (%s)' % threading.current_thread())
    yield from asyncio.sleep(1)
    print('Hello again! (%s)' % threading.current_thread())
def task(n, m):
    print('{}: sleeping {}'.format(threading.current_thread().name, n))
    print("test m: ", m)
    time.sleep(n / 10)
    print('{}: done with {}'.format(threading.current_thread().name, n))
    return n / 10
Ejemplo n.º 37
0
 def id_func(job, q):
     q.put(threading.current_thread().ident)
     sleep(0.05)
Ejemplo n.º 38
0
def worker():
    print(threading.enumerate())
    print('{} say hello'.format(threading.current_thread().getName()))
    print('active_count: ', threading.active_count())
Ejemplo n.º 39
0
 def format(self, record):
     record.pid = os.getpid()
     record.dbname = getattr(threading.current_thread(), 'dbname', '?')
     return logging.Formatter.format(self, record)
Ejemplo n.º 40
0
 def run(self):
     the_thread = threading.current_thread()
     for x in range(3):
         print("%s正在画图..." % the_thread.name)
         time.sleep(1)
Ejemplo n.º 41
0
 def _unblocked(self, doc):
     thread = threading.current_thread()
     thread_id = thread.ident if thread else None
     return doc is self.curdoc and self._thread_id == thread_id
Ejemplo n.º 42
0
import threading

# 返回当前的线程变量
print(threading.current_thread())
# 返回正在运行的线程数量,与len(threading.enumerate())有相同的结果。
print(threading.active_count())
# 返回一个包含正在运行的线程的list。正在运行指线程启动后、结束前,不包括启动前和终止后的线程。
print(threading.enumerate())


def worker():
    print(threading.enumerate())
    print('{} say hello'.format(threading.current_thread().getName()))
    print('active_count: ', threading.active_count())


th1 = threading.Thread(target=worker, name='me_thread')
th2 = threading.Thread(target=worker, name='me02_thread')
# 开启线程
th1.start()
th1.join()
th2.start()
th2.join()
Ejemplo n.º 43
0
def run_server(port, pq, cond):
    httpd = QueuingHTTPServer(('0.0.0.0', port), apiHandler, pq, cond)
    while True:
        print("HANDLE: " + threading.current_thread().name)
        httpd.handle_request()
Ejemplo n.º 44
0
 def get_dispatcher_name(q):
     q.put(current_thread().name)
Ejemplo n.º 45
0
 def run(self):
     self.logger.debug("ssh port forwarding running in tid: 0x{0:x}".format(
         threading.current_thread().ident))
     self._forward_server.serve_forever()
Ejemplo n.º 46
0
    def serve_client(self, conn):
        '''
        Handle requests from the proxies in a particular process/thread
        '''
        util.debug('starting server thread to service %r',
                   threading.current_thread().name)

        recv = conn.recv
        send = conn.send
        id_to_obj = self.id_to_obj

        while not self.stop_event.is_set():

            try:
                methodname = obj = None
                request = recv()
                ident, methodname, args, kwds = request
                obj, exposed, gettypeid = id_to_obj[ident]

                if methodname not in exposed:
                    raise AttributeError(
                        'method %r of %r object is not in exposed=%r' %
                        (methodname, type(obj), exposed))

                function = getattr(obj, methodname)

                try:
                    res = function(*args, **kwds)
                except Exception as exc:
                    msg = ('#ERROR', exc)
                else:
                    typeid = gettypeid and gettypeid.get(methodname, None)
                    if typeid:
                        rident, rexposed = self.create(conn, typeid, res)
                        token = Token(typeid, self.address, rident)
                        msg = ('#PROXY', (rexposed, token))
                    else:
                        msg = ('#RETURN', res)

            except AttributeError:
                if methodname is None:
                    msg = ('#TRACEBACK', format_exc())
                else:
                    try:
                        fallback_func = self.fallback_mapping[methodname]
                        result = fallback_func(self, conn, ident, obj, *args,
                                               **kwds)
                        msg = ('#RETURN', result)
                    except Exception:
                        msg = ('#TRACEBACK', format_exc())

            except EOFError:
                util.debug('got EOF -- exiting thread serving %r',
                           threading.current_thread().name)
                sys.exit(0)

            except Exception:
                msg = ('#TRACEBACK', format_exc())

            try:
                try:
                    send(msg)
                except Exception:
                    send(('#UNSERIALIZABLE', repr(msg)))
            except Exception as exc:
                util.info('exception in thread serving %r',
                          threading.current_thread().name)
                util.info(' ... message was %r', msg)
                util.info(' ... exception was %r', exc)
                conn.close()
                sys.exit(1)
Ejemplo n.º 47
0
def collection_replica_update(once=False):
    """
    Main loop to check and update the collection replicas.
    """

    logging.info('collection_replica_update: starting')

    logging.info('collection_replica_update: started')

    # Make an initial heartbeat so that all abacus-collection-replica daemons have the correct worker number on the next try
    executable = 'abacus-collection-replica'
    hostname = socket.gethostname()
    pid = os.getpid()
    current_thread = threading.current_thread()
    live(executable=executable,
         hostname=hostname,
         pid=pid,
         thread=current_thread)

    while not graceful_stop.is_set():
        try:
            # Heartbeat
            heartbeat = live(executable=executable,
                             hostname=hostname,
                             pid=pid,
                             thread=current_thread)

            # Select a bunch of collection replicas for to update for this worker
            start = time.time()  # NOQA
            replicas = get_cleaned_updated_collection_replicas(
                total_workers=heartbeat['nr_threads'] - 1,
                worker_number=heartbeat['assign_thread'])

            logging.debug('Index query time %f size=%d' %
                          (time.time() - start, len(replicas)))
            # If the list is empty, sent the worker to sleep
            if not replicas and not once:
                logging.info(
                    'collection_replica_update[%s/%s] did not get any work' %
                    (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1))
                time.sleep(10)
            else:
                for replica in replicas:
                    if graceful_stop.is_set():
                        break
                    start_time = time.time()
                    update_collection_replica(replica)
                    logging.debug(
                        'collection_replica_update[%s/%s]: update of collection replica "%s" took %f'
                        %
                        (heartbeat['assign_thread'], heartbeat['nr_threads'] -
                         1, replica['id'], time.time() - start_time))
        except Exception:
            logging.error(traceback.format_exc())
        if once:
            break

    logging.info('collection_replica_update: graceful stop requested')
    die(executable=executable,
        hostname=hostname,
        pid=pid,
        thread=current_thread)
    logging.info('collection_replica_update: graceful stop done')
Ejemplo n.º 48
0
def log(msg):
    current_time = time.strftime("%H:%M:%S", time.localtime())
    print('%s:   %s :%s ' %
          (current_time, threading.current_thread().getName(), msg))
Ejemplo n.º 49
0
def work():
    while True:
        url = queue.get()
        Spider.crawl_page(threading.current_thread().name, url)
        queue.task_done()
Ejemplo n.º 50
0
    def harakiri(self, signal_number, stack_frame):  # type: (int, FrameType) -> None
        """
        Handles the reception of a timeout signal indicating that a request has been processing for too long, as
        defined by the harakiri settings. This method makes use of two "private" Python functions,
        `sys._current_frames` and `os._exit`, but both of these functions are publicly documented and supported.
        """
        if not self._shutdown_lock.acquire(False):
            # Ctrl+C can result in 2 or even more signals coming in within nanoseconds of each other. We lock to
            # prevent handling them all. The duplicates can always be ignored, so this is a non-blocking acquire.
            return

        current_thread_id = threading.current_thread().ident

        threads = {t.ident: {'name': t.name, 'traceback': ['Unknown']} for t in threading.enumerate()}
        # noinspection PyProtectedMember
        for thread_id, frame in sys._current_frames().items():
            stack = []
            # If this is the current thread, we use the passed in `stack_frame` instead of the `frame` from
            # `current_frames`, so that this harakiri code is not in the logged stack trace.
            for f in traceback.format_stack(stack_frame if current_thread_id == thread_id else frame):
                stack.extend(f.rstrip().split('\n'))
            threads.setdefault(thread_id, {'name': thread_id})['traceback'] = stack

        extra = {'data': {'thread_status': {
            t['name']: [l.rstrip() for l in t['traceback']] for t in threads.values()
        }}}
        details = 'Current thread status at harakiri trigger:\n{}'.format('\n'.join((
            'Thread {}:\n{}'.format(t['name'], '\n'.join(t['traceback'])) for t in threads.values()
        )))

        try:
            self._last_signal = signal_number
            self._last_signal_received = time.time()

            if self.shutting_down:
                self.logger.error(
                    'Graceful shutdown failed {} seconds after harakiri. Exiting now!'.format(
                        self.settings['harakiri']['shutdown_grace']
                    ),
                    extra=extra,
                )
                self.logger.info(details)

                try:
                    self.metrics.counter('server.error.harakiri', harakiri_level='emergency')
                    self.metrics.commit()
                finally:
                    # We tried shutting down gracefully, but it didn't work. This probably means that we are CPU bound
                    # in lower-level C code that can't be easily interrupted. Because of this, we forcefully terminate
                    # the server with prejudice. But first, we do our best to let things finish cleanly, if possible.
                    # noinspection PyProtectedMember
                    try:
                        exit_func = getattr(atexit, '_run_exitfuncs', None)
                        if exit_func:
                            thread = threading.Thread(target=exit_func)
                            thread.start()
                            thread.join(5.0)  # don't let cleanup tasks take more than five seconds
                        else:
                            # we have no way to run exit functions, so at least give I/O two seconds to flush
                            time.sleep(2.0)
                    finally:
                        os._exit(1)
            else:
                self.logger.warning(
                    'No activity for {} seconds, triggering harakiri with grace period of {} seconds'.format(
                        self.settings['harakiri']['timeout'],
                        self.settings['harakiri']['shutdown_grace'],
                    ),
                    extra=extra,
                )
                self.logger.info(details)

                # We re-set the alarm so that if the graceful shutdown we're attempting here doesn't work, harakiri
                # will be triggered again to force a non-graceful shutdown.
                signal.alarm(self.settings['harakiri']['shutdown_grace'])

                # Just setting the shutting_down flag isn't enough, because, if harakiri was triggered, we're probably
                # CPU or I/O bound in some way that won't return any time soon. So we also raise HarakiriInterrupt to
                # interrupt the main thread and cause the service to shut down in an orderly fashion.
                self.shutting_down = True
                raise HarakiriInterrupt()
        finally:
            self._shutdown_lock.release()
Ejemplo n.º 51
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import socket
import threading
import time

#服务器要能够区分一个Socket连接是和哪个客户端绑定的。
#唯一 Socket依赖4项:服务器地址、服务器端口、客户端地址、客户端端口。
#服务器 同时响应多个客户端的请求,所以每个连接都需要一个新的进程或者新的线程来处理

print 'thread %s is running...' % threading.current_thread().name

s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 监听端口:
s.bind(('127.0.0.1', 9999)) #127.0.0.1表示本机地址,如果绑定到这个地址,客户端必须同时在本机运行才能连接,外部的计算机无法连接进来。

s.listen(5) #始监听端口 指定等待连接的最大数量
print "waiting for client request connect"

def tcplink(sock, addr):
    #print 'Accept new connection from %s:%s...' % addr
    sock.send('Welcome!')

    while True: #等待客户端数据
        data = sock.recv(1024)
        time.sleep(1)  
        if data == 'exit' or not data:
            break
        sock.send('Hello, %s!' % data)
Ejemplo n.º 52
0
def fengniao(num):
    content = get_content(START_URL.format(num))
    urls = list_page_parse(content)
    print(threading.current_thread().getName())
    for url in urls:
        download_picture(url)
Ejemplo n.º 53
0
    def schedule(self, coroutine, trigger=None):
        """Schedule a coroutine by calling the send method.

        Args:
            coroutine (cocotb.decorators.coroutine): The coroutine to schedule.
            trigger (cocotb.triggers.Trigger): The trigger that caused this
                coroutine to be scheduled.
        """
        if trigger is None:
            send_outcome = outcomes.Value(None)
        else:
            send_outcome = trigger._outcome
        if _debug:
            self.log.debug("Scheduling with {}".format(send_outcome))

        coro_completed = False
        try:
            result = coroutine._advance(send_outcome)
            if _debug:
                self.log.debug("Coroutine %s yielded %s (mode %d)" %
                               (coroutine.__name__, str(result), self._mode))

        except cocotb.decorators.CoroutineComplete as exc:
            if _debug:
                self.log.debug("Coroutine {} completed with {}".format(
                    coroutine, coroutine._outcome
                ))
            coro_completed = True

        # this can't go in the else above, as that causes unwanted exception
        # chaining
        if coro_completed:
            self.unschedule(coroutine)

        # Don't handle the result if we're shutting down
        if self._terminate:
            return

        if not coro_completed:
            try:
                result = self._trigger_from_any(result)
            except TypeError as exc:
                # restart this coroutine with an exception object telling it that
                # it wasn't allowed to yield that
                result = NullTrigger(outcome=outcomes.Error(exc))

            self._coroutine_yielded(coroutine, result)

        # We do not return from here until pending threads have completed, but only
        # from the main thread, this seems like it could be problematic in cases
        # where a sim might change what this thread is.

        if self._main_thread is threading.current_thread():

            for ext in self._pending_threads:
                ext.thread_start()
                if _debug:
                    self.log.debug("Blocking from %s on %s" % (threading.current_thread(), ext.thread))
                state = ext.thread_wait()
                if _debug:
                    self.log.debug("Back from wait on self %s with newstate %d" % (threading.current_thread(), state))
                if state == external_state.EXITED:
                    self._pending_threads.remove(ext)
                    self._pending_events.append(ext.event)

        # Handle any newly queued coroutines that need to be scheduled
        while self._pending_coros:
            self.add(self._pending_coros.pop(0))
Ejemplo n.º 54
0
    def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
                 conf, jsc, profiler_cls):
        self.environment = environment or {}
        # java gateway must have been launched at this point.
        if conf is not None and conf._jconf is not None:
            # conf has been initialized in JVM properly, so use conf directly. This represent the
            # scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
            # created and then stopped, and we create a new SparkConf and new SparkContext again)
            self._conf = conf
        else:
            self._conf = SparkConf(_jvm=SparkContext._jvm)
            if conf is not None:
                for k, v in conf.getAll():
                    self._conf.set(k, v)

        self._batchSize = batchSize  # -1 represents an unlimited batch size
        self._unbatched_serializer = serializer
        if batchSize == 0:
            self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
        else:
            self.serializer = BatchedSerializer(self._unbatched_serializer,
                                                batchSize)

        # Set any parameters passed directly to us on the conf
        if master:
            self._conf.setMaster(master)
        if appName:
            self._conf.setAppName(appName)
        if sparkHome:
            self._conf.setSparkHome(sparkHome)
        if environment:
            for key, value in environment.items():
                self._conf.setExecutorEnv(key, value)
        for key, value in DEFAULT_CONFIGS.items():
            self._conf.setIfMissing(key, value)

        # Check that we have at least the required parameters
        if not self._conf.contains("spark.master"):
            raise Exception("A master URL must be set in your configuration")
        if not self._conf.contains("spark.app.name"):
            raise Exception("An application name must be set in your configuration")

        # Read back our properties from the conf in case we loaded some of them from
        # the classpath or an external config file
        self.master = self._conf.get("spark.master")
        self.appName = self._conf.get("spark.app.name")
        self.sparkHome = self._conf.get("spark.home", None)

        for (k, v) in self._conf.getAll():
            if k.startswith("spark.executorEnv."):
                varName = k[len("spark.executorEnv."):]
                self.environment[varName] = v

        self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")

        # Create the Java SparkContext through Py4J
        self._jsc = jsc or self._initialize_context(self._conf._jconf)
        # Reset the SparkConf to the one actually used by the SparkContext in JVM.
        self._conf = SparkConf(_jconf=self._jsc.sc().conf())

        # Create a single Accumulator in Java that we'll send all our updates through;
        # they will be passed back to us through a TCP server
        auth_token = self._gateway.gateway_parameters.auth_token
        self._accumulatorServer = accumulators._start_update_server(auth_token)
        (host, port) = self._accumulatorServer.server_address
        self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
        self._jsc.sc().register(self._javaAccumulator)

        # If encryption is enabled, we need to setup a server in the jvm to read broadcast
        # data via a socket.
        # scala's mangled names w/ $ in them require special treatment.
        self._encryption_enabled = self._jvm.PythonUtils.getEncryptionEnabled(self._jsc)

        self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
        self.pythonVer = "%d.%d" % sys.version_info[:2]

        if sys.version_info < (2, 7):
            warnings.warn("Support for Python 2.6 is deprecated as of Spark 2.0.0")

        # Broadcast's __reduce__ method stores Broadcast instances here.
        # This allows other code to determine which Broadcast instances have
        # been pickled, so it can determine which Java broadcast objects to
        # send.
        self._pickled_broadcast_vars = BroadcastPickleRegistry()

        SparkFiles._sc = self
        root_dir = SparkFiles.getRootDirectory()
        sys.path.insert(1, root_dir)

        # Deploy any code dependencies specified in the constructor
        self._python_includes = list()
        for path in (pyFiles or []):
            self.addPyFile(path)

        # Deploy code dependencies set by spark-submit; these will already have been added
        # with SparkContext.addFile, so we just need to add them to the PYTHONPATH
        for path in self._conf.get("spark.submit.pyFiles", "").split(","):
            if path != "":
                (dirname, filename) = os.path.split(path)
                if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
                    self._python_includes.append(filename)
                    sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))

        # Create a temporary directory inside spark.local.dir:
        local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
        self._temp_dir = \
            self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
                .getAbsolutePath()

        # profiling stats collected for each PythonRDD
        if self._conf.get("spark.python.profile", "false") == "true":
            dump_path = self._conf.get("spark.python.profile.dump", None)
            self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
        else:
            self.profiler_collector = None

        # create a signal handler which would be invoked on receiving SIGINT
        def signal_handler(signal, frame):
            self.cancelAllJobs()
            raise KeyboardInterrupt()

        # see http://stackoverflow.com/questions/23206787/
        if isinstance(threading.current_thread(), threading._MainThread):
            signal.signal(signal.SIGINT, signal_handler)
Ejemplo n.º 55
0
 def thread_done(self):
     if _debug:
         self._log.debug("Thread finished from %s" % (threading.current_thread()))
     self._propagate_state(external_state.EXITED)
Ejemplo n.º 56
0
 def test_joining_current_thread(self):
     current_thread = threading.current_thread()
     self.assertRaises(RuntimeError, current_thread.join)
Ejemplo n.º 57
0
def process_student():
    std = local_school.student
    print('Hello, %s (in %s)' %(std,threading.current_thread().name))
Ejemplo n.º 58
0
 def execute_external(func, _waiter):
     _waiter._outcome = outcomes.capture(func, *args, **kwargs)
     if _debug:
         self.log.debug("Execution of external routine done %s" % threading.current_thread())
     _waiter.thread_done()
Ejemplo n.º 59
0
 def __get_thread_name(self):
     #multiprocessing.current_process().name
     return threading.current_thread().name
Ejemplo n.º 60
0
 def _propagate_state(self, new_state):
     with self.cond:
         if _debug:
             self._log.debug("Changing state from %d -> %d from %s" % (self.state, new_state, threading.current_thread()))
         self.state = new_state
         self.cond.notify()