def start(self):
        """

        :return:
        """

        for _ in range(self.threads):
            t = threading.Thread(target=self.process, name=_)
            t.setDaemon(True)
            t.start()

        try:
            while threading.active_count() > 1:
                time.sleep(1)
        except KeyboardInterrupt:
            self.lock.acquire()
            self.stop = True
            print 'wait all sub threads exists......'
            self.lock.release()
        finally:
            while threading.active_count() > 1:
                time.sleep(1)
            else:
                self.cursor.close()
                self.conn.close()
def wait_for_thread_done(report=False):
    while threading.active_count() > 1:
        check_thread_exception()
        time.sleep(1)
        if report:
            print 'thread workers: %d' % (threading.active_count() - 1)
    check_thread_exception()
Esempio n. 3
0
    def __init__(self, suite):
        self.timestamp = datetime.datetime.now()
        # Create work folder
        os.chdir(dkenv.DKROOT)
        self.work = make_path(os.path.join('_coverage', '_covroot'))
        os.chdir(self.work)
        self.cache = make_path('cache')
        os.chdir(self.cache)

        running = []

        def show_running():
            current = list(sorted(set(int(t.name.split('-')[1])
                                      for t in threading.enumerate()
                                      if '-' in t.name)))
            if running != current:
                print len(current), current
                running[:] = current

        for i, tfile in enumerate(suite):
            show_running()
            while psutil.cpu_percent(.5) > 85 or threading.active_count() > 60:
                time.sleep(.5)

            threading.Thread(
                target=tfile.run_test,
                args=(self,)
            ).start()

        while 1:
            n = threading.active_count()
            if n == 1:
                break
            time.sleep(1)
            show_running()
def run_tcp_server(host='', port=21567):
    server_addr = (host, port)
    tcp_server_socket = skt.socket(skt.AF_INET, skt.SOCK_STREAM) 
    tcp_server_socket.bind(server_addr)
    tcp_server_socket.listen(5)
    client_skt_addr_dict = {}
    print 'waiting for connection...'

    try:
        while True:
            tcp_client_socket, cli_addr = tcp_server_socket.accept()
            t = MyThread(tcp_client_socket, cli_addr)
            client_skt_addr_dict[(tcp_client_socket, cli_addr)] = t
            t.start()
            print '=' * 20
            for d in client_skt_addr_dict:
                print client_skt_addr_dict[d]

    except (KeyboardInterrupt, EOFError), err:
        print 'got error,cnt=', err, threading.active_count()
        for d in client_skt_addr_dict:
            t = client_skt_addr_dict[d]
            print 'cnt=', threading.active_count()
            t.quit()

        tcp_client_socket.close()
        tcp_server_socket.close()
        print client_skt_addr_dict, threading.active_count()
def main():
	global xtime
	global success
	global fail
	success=0
	fail=0
	xtime=time.strftime("%Y-%m-%d[%H.%M.%S]")
	print xtime
	MaxThreads=sys.argv[1]
	MaxThreads=int(MaxThreads)
	mythreads = Queue.Queue(maxsize = 0) 
	for i in open("u.txt").readlines():#先将所有线程装入队列,等待取出
		i=i.strip('\n')
		t=threading.Thread(target=post, args=(i,))
		t.setDaemon(True)
		mythreads.put(t)
	print 'Total Threads:%d' %MaxThreads
	print 'Total URLs:%d' %mythreads.qsize()
	time.sleep(2)
	while True:#若条件都不满足,则死循环
		if(threading.active_count() == 1 and mythreads.qsize() == 0): #若剩余URL数等于0,活动线程为1,则退出.主线程占一个 #2015-9-28 00:43 Fixed
			print 'Done at %s' %time.strftime("%Y-%m-%d[%H.%M.%S]")
			break
		elif(threading.active_count() < MaxThreads): #判断正在运行的线程数量,如果小于输入值则继续添加线程
			if (mythreads.qsize() ==0 ): #如果剩余URL为0,则不从列队中读取(否则一直处于卡死状态),并改变窗口标题提示用户 #2015-9-28 20:15 Fixed
				os.system("title No URL left,waiting to exit,Current threads: %d,Success:%d,Failed:%d" %(threading.active_count(),success,fail))
				time.sleep(60) #60秒之后回到上一个if判断线程是否全部结束
			else:
				os.system("title Current threads: %d,URLs left: %d,Success:%d,Failed:%d" %(threading.active_count(),mythreads.qsize(),success,fail)) #更改窗口标题,如觉得太消耗CPU资源可以注释掉 #线程及URL的变化只会在启动线程时变化 2015-10-2 22:30 Fixed
				t=mythreads.get() #取出一个线程
				t.start() #加载该线程
				t.join(1) #阻塞一秒钟,然后加载下个线程,不愿意等可以注释掉
	print 'Success:%d,Failed:%d' %(success,fail)
Esempio n. 6
0
    def run(self):
        """
        This method executes the actual download.  It calls the parse_remote_dir_tree method and waits for
        it to return.  Once it returns it waits until all download threads have finishes.  Prior to threads
        finishing it prints a list of the files currently being downloaded
        :return:
        """

        print('[+] Starting Parse And Download Of ' + self.download_url + '\n')

        self.parse_remote_dir_tree(self.download_url, '')

        print('\nAll Download Threads Launched.\n')
        # TODO Make thread checking it's down method
        last_active = 0
        while threading.active_count() > 1:
            if last_active != threading.active_count():
                self.clear_screen()
                print('---------- ACTIVE DOWNLOAD THREADS ----------')
                print('The Following ', threading.active_count() - 1, ' files are still downloading')
                for thrd in threading.enumerate():
                    if thrd.name.lower() == 'mainthread':
                        continue
                    print('[+] ', thrd.name)
            last_active = threading.active_count()
            time.sleep(1)

        print('[!] Success: All Downloads Have Finished')
Esempio n. 7
0
    def percentage_fancy(self):

        if not DNSreverse._percentage_bound:
            DNSreverse._percentage_bound = DNSreverse.ip_amount / 10.0
            if not int(DNSreverse._percentage_bound):
                DNSreverse._percentage_bound = 1.0

        if not DNSreverse.ip_done:
            return

        sync_on_disk = False
        if not DNSreverse.ip_done % int(DNSreverse._percentage_bound):
            print "%d\t%d%%\t%s\tT%d" % (DNSreverse.ip_done,
                                    (DNSreverse.ip_done * (10 / DNSreverse._percentage_bound) ),
                                    time.ctime(), threading.active_count())
            sync_on_disk = True

        # other random possibility based on birthday paradox to show counters...
        if random.randint(0, int(DNSreverse._percentage_bound * 10 )) == DNSreverse.ip_done:
            print "%d\t%d%%\t%s\tT%d" % (DNSreverse.ip_done,
                                    (DNSreverse.ip_done * (10 / DNSreverse._percentage_bound) ),
                                    time.ctime(), threading.active_count())
            sync_on_disk = True

        if sync_on_disk:
            DNSreverse.save_status(mandatory=False)
Esempio n. 8
0
def test_pluginmanager_constructor():
    threads = threading.active_count()
    pplugins.PluginManager()

    # plugin manager should not have started the reaping thread when called
    # through the constructor
    assert threading.active_count() == threads
Esempio n. 9
0
def loop():
    
    while True:
        print threading.current_thread().name
        print threading.active_count()
        print '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
        if len(iidbuf) is not 0:    
            print 10000000-len(iidbuf)
            iid= iidbuf[0]
            iidbuf.pop(0)
            print iid
            
            try:
                if imdb_ids.find_one({'imdbID': iid}) == None:
                    url='http://www.imdb.com/title/'+iid+'/'
                    req = urllib2.Request(url, headers={ 'User-Agent': 'Mozilla/5.0' })
                    cont = urllib2.urlopen(req).read()
                    soup = BeautifulSoup(cont, "lxml")
                    data={}
                    data['imdbID']=iid
                    data['html_full']=cont
                    imdb_ids.insert_one(data)
                    print bcolors.OKGREEN+ "ADDED" + bcolors.ENDC
            except:
                print '!!!'
            end=time.time()
            print bcolors.OKBLUE+str((end-start)/60) + "M" + bcolors.ENDC
Esempio n. 10
0
 def test_retransmission_queue_empty_when_timer_expires(self):
     fake_rto = 100
     size = 5
     data = self.DEFAULT_DATA[:size]
     ack_number = self.DEFAULT_ISS + size
     ack_packet = self.packet_builder.build(flags=[ACKFlag],
                                            seq=self.DEFAULT_IRS,
                                            ack=ack_number,
                                            window=self.DEFAULT_IW)
     rqueue = self.socket.protocol.rqueue
     rto_estimator = self.socket.protocol.rto_estimator
     timer = self.socket.protocol.retransmission_timer
     rto_estimator.rto = fake_rto
     thread_count = threading.active_count()
     # Send some data. This will enqueue the packet.
     self.socket.send(data)
     self.receive()
     # Now remove it from rqueue. This is quite ugly but it has to
     # be done this way.
     snd_una = self.socket.protocol.control_block.get_snd_una()
     snd_nxt = self.socket.protocol.control_block.get_snd_nxt()
     rqueue.remove_acknowledged_by(ack_packet, snd_una, snd_nxt)
     
     self.assertTrue(rqueue.empty())
     self.assertTrue(timer.is_running())
     
     # Wait until timer expires.
     time.sleep(2*fake_rto*CLOCK_TICK)
     
     # Check that we have the same number of threads (if the packet sender
     # crashed, it will be less).
     self.assertEquals(thread_count, threading.active_count())
Esempio n. 11
0
def test_kafka_producer_gc_cleanup():
    threads = threading.active_count()
    producer = KafkaProducer(api_version='0.9') # set api_version explicitly to avoid auto-detection
    assert threading.active_count() == threads + 1
    del(producer)
    gc.collect()
    assert threading.active_count() == threads
 def test_initializes_with_active_workers(self):
     original_thread_count = threading.active_count()
     with Pool(3) as pool:
         pool_thread_count = threading.active_count() - original_thread_count
         eq_(3, pool_thread_count)
         eq_(3, pool.size)
         eq_(3, len(pool.workers))
Esempio n. 13
0
 def event_chessboardClick(self, sq):
     print("ANZ THREADS", active_count())
     if type(self.players[self.game.playerTurn]) is players.Human:
         self.players[self.game.playerTurn].processInput(self.game, sq)
     else:
         if active_count() == 1:
             Thread(target=self.players[self.game.playerTurn].doMove, args=(copy.deepcopy(self.game),)).start()
Esempio n. 14
0
    def __init__(self):
        self.points = 60
        self.living = True

        self.server = Server()
        self.spi = Spi(self.points)
        self.queueSize = 20
        self.proccessQueue = queue.Queue(self.queueSize)
        self.oscWindow_1 = []

        self.trigger = Trigger(1,-5)

        #Thread to handle reading from SPI then writing to Server
        spiThread = threading.Thread(target = self.spiRead)
        spiThread.name = "SPI_Thread"
        spiThread.deamon = True    #Kill off on its own
        spiThread.start()

        #Thread to handle reading from Server then writing to SPI
        serverThread = threading.Thread(target = self.serverRead)
        serverThread.name = "SERVER_Thread"
        serverThread.deamon = True
        serverThread.start()

        print(threading.active_count())
        for thrd in threading.enumerate():
            if(thrd.isDaemon):
                print(thrd)

        while(self.living):
            x= 0
        print(threading.active_count())
        for thrd in threading.enumerate():
            if(thrd.isDaemon):
                print(thrd)
Esempio n. 15
0
 def active_thread_counter():
     with self.thread_watcher.register(u'线程计数器'):
         current_count = threading.active_count()
         while True:
             if current_count != threading.active_count():
                 current_count = threading.active_count()
                 self.emit(SIGNAL('activeThreadCountChanged(int)'), current_count)
Esempio n. 16
0
def db(threads=2):
    """Continuously update the databases."""

    def oldest_first(mode):
        idx = index.read()
        return sorted(idx[mode], key=lambda x: idx[mode][x]["timestamp"])

    def thread(mode, stop, lock):
        # the interval is used to prevent hammering the DJMAX site.  it sets the
        # number of seconds to wait before downloading score data again.
        if mode == game.mode.star:
            interval = 900
        elif mode == game.mode.pop:
            interval = 600
        elif mode == game.mode.club:
            interval = 1200
        elif mode == game.mode.mission:
            interval = 1800
        else:
            raise ValueError("Invalid game mode")
        while not stop.is_set():
            with lock:
                next = names[mode].pop(0)
            try:
                database.create(mode, next)
            except:
                names[mode].insert(0, next)
                stop.set()
                raise
            else:
                with lock:
                    names[mode].append(next)
                    index.touch(mode, next)
                stop.wait(interval)

    names = {}
    names[game.mode.star] = oldest_first(game.mode.star)
    names[game.mode.pop] = oldest_first(game.mode.pop)
    names[game.mode.club] = oldest_first(game.mode.club)
    names[game.mode.mission] = oldest_first(game.mode.mission)

    stops = []
    for mode in (game.mode.star, game.mode.pop, game.mode.club, game.mode.mission):
        lock = threading.Lock()
        for t in range(threads):
            stop = threading.Event()
            threading.Thread(target=thread, args=(mode, stop, lock)).start()
            stops.append(stop)
    try:
        while threading.active_count() > 1:
            print("{} of {} threads running.".format(threading.active_count() - 1, threads * 4), end="\r")
            time.sleep(30)
    except KeyboardInterrupt:
        for stop in stops:
            stop.set()
        print("Finishing current jobs.  Please wait...")
        while threading.active_count() > 1:
            time.sleep(2)
    finally:
        print("Done.")
def test():
    global session_uuid
    global session_to
    global session_mc
    vm_num = os.environ.get('ZSTACK_TEST_NUM')
    if not vm_num:
        vm_num = 0
    else:
        vm_num = int(vm_num)

    test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold)
    test_util.test_logger('ZSTACK_TEST_NUM is %d' % vm_num)

    org_num = vm_num
    vm_creation_option = test_util.VmOption()
    image_name = os.environ.get('imageName_s')
    image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
    l3_name = os.environ.get('l3VlanNetworkName1')
    conditions = res_ops.gen_query_conditions('name', '=', l3_name)
    l3_uuid = res_ops.query_resource_with_num(res_ops.L3_NETWORK, conditions, \
            session_uuid, start = 0, limit = 1)[0].uuid
    vm_creation_option.set_l3_uuids([l3_uuid])
    conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
    instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
    vm_creation_option.set_image_uuid(image_uuid)
    vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
    session_uuid = acc_ops.login_as_admin()

    #change account session timeout. 
    session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid)
    session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid)

    vm_creation_option.set_session_uuid(session_uuid)

    vm = test_vm_header.ZstackTestVm()
    random_name = random.random()
    vm_name = 'multihost_basic_vm_%s' % str(random_name)
    vm_creation_option.set_name(vm_name)

    while vm_num > 0:
        check_thread_exception()
        vm.set_creation_option(vm_creation_option)
        vm_num -= 1
        thread = threading.Thread(target=create_vm, args=(vm,))
        while threading.active_count() > thread_threshold:
            time.sleep(1)
        thread.start()

    while threading.active_count() > 1:
        time.sleep(0.01)

    cond = res_ops.gen_query_conditions('name', '=', vm_name)
    vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)
    con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
    con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
    acc_ops.logout(session_uuid)
    if vms == org_num:
        test_util.test_pass('Create %d VMs Test Success' % org_num)
    else:
        test_util.test_fail('Create %d VMs Test Failed. Only find %d VMs.' % (org_num, vms))
Esempio n. 18
0
def main():
    assert len(sys.argv) == 2
    urlc = urlsplit(sys.argv[1], scheme='owserver', allow_fragments=False)
    host = urlc.hostname or 'localhost'
    port = urlc.port or 4304
    assert not urlc.path or urlc.path == '/'

    proxy = protocol.proxy(host, port, verbose=False)
    pid = -1
    ver = ''
    try:
        pid = int(proxy.read('/system/process/pid'))
        ver = proxy.read('/system/configuration/version').decode()
    except protocol.OwnetError:
        pass
    log("{0}, pid={1:d}, {2}".format(proxy, pid, ver))

    delta = 60
    assert threading.active_count() is 1
    started = 0
    while threading.active_count() == started + 1:
        th = threading.Thread(target=worker, args=(proxy, started, ), )
        th.start()
        started += 1
        try:
            sleep(delta)
        except KeyboardInterrupt:
            break
    log('started {0:d} worker threads'.format(started))
    log('still waiting for {0:d} worker threads'
        .format(threading.active_count()-1))
Esempio n. 19
0
    def test_sleep_n_book_on_date_in_thread_right_now(self):
        s_today = datetime.now().strftime('%Y%m%d')
        # all_args = [(['210106198404304617', 'chen84430mo', '1'],
        #              {'book_date': datetime.strptime(s_today, '%Y%m%d'), 'time_period': 'Morning'}),
        #             (['230107198706211520', '0621', '1'],
        #              {'book_date': datetime.strptime(s_today, '%Y%m%d'), 'time_period': 'Morning'}),
        #             (['130221198312055114', '1205', '2'],
        #              {'book_date': datetime.strptime(s_today, '%Y%m%d'), 'time_period': 'Morning'}),
        # ]
        all_args = [(['130221198312055114', '1205', '1'],
                     {'book_date': datetime.strptime(s_today, '%Y%m%d'), 'time_period': 'Morning'}),
                    (['230107198706211520', '0621', '2'],
                     {'book_date': datetime.strptime(s_today, '%Y%m%d'), 'time_period': 'Morning'}),
                    ]

        def worker(*args, **kwargs):
            exector = SleepingExecutorWithoutBooking(*args, **kwargs)
            server_time = exector.get_server_time()
            self.next_n_minutes(exector, server_time, func_get_book_minute=lambda e: e + 3)
            exector.sleep_n_book_on_date()
            with open(args[0], 'w') as f:
                f.write('Thread {0} has done'.format(args[0]))


        working_threads = [Thread(target=worker, args=args, kwargs=kwargs) for args, kwargs in all_args]

        existed_threads = active_count()
        for a_thread in working_threads:
            print(a_thread.is_alive())
            rslt = a_thread.start()
            print(a_thread.is_alive())

        while active_count() > existed_threads:
            print('{0} threads are running'.format(active_count()))
            time.sleep(10)
Esempio n. 20
0
def main():
  global args
  parser = argparse.ArgumentParser(description='This is my description')

  parser.add_argument('-r', '--radar-name', help="name of radar, used as topic string /adsb/<radar>/json", default='radar')
  parser.add_argument('-m', '--mqtt-host', help="MQTT broker hostname", default='127.0.0.1')
  parser.add_argument('-p', '--mqtt-port', type=int, help="MQTT broker port number (default 1883)", default=1883)
  parser.add_argument('-u', '--mqtt-user', help="MQTT broker user")
  parser.add_argument('-a', '--mqtt-password', help="MQTT broker password")
  parser.add_argument('-H', '--dump1090-host', help="dump1090 hostname", default='127.0.0.1')
  parser.add_argument('-P', '--dump1090-port', type=int, help="dump1090 port number (default 30003)", default=30003)
  parser.add_argument('-v', '--verbose',  action="store_true", help="Verbose output")
  parser.add_argument('-bdb', '--basestationdb', help="BaseStation SQLite DB (download from http://planebase.biz/bstnsqb)")
  parser.add_argument('-mdb', '--myplanedb', help="Your own SQLite DB with the same structure as BaseStation.sqb where you can add planes missing from BaseStation db")

  args = parser.parse_args()

  signal.signal(signal.SIGINT, signal_handler)
  if args.verbose:
    loggingInit(logging.DEBUG)
  else:
    loggingInit(logging.INFO)

  mqttConnect()
  adsbConnect()

  numThreads = threading.active_count()
  while numThreads == threading.active_count():
    time.sleep(0.1)
  log.critical("Exiting")
Esempio n. 21
0
    def submit_all(self, samples, **kwargs):
        if self.testing:
            print 'in testing mode, so only doing one sample at a time.'
            for sample in samples:
                self.submit(sample, **kwargs)
            return
            
        results = {}
        threads = []
        print 'launching threads (max %s)...' % self.max_threads
        def threadable_fcn(sample):
            results[sample.name] = self.submit(sample, **kwargs)
        for sample in samples:
            while threading.active_count() - 1 >= self.max_threads:
                time.sleep(0.1)
            thread = threading.Thread(target=threadable_fcn, args=(sample,))
            threads.append((sample,thread))
            thread.start()
        sleep_count = 0
        while threading.active_count() > 1:
            if sleep_count % 150 == 3:
                print 'waiting for these threads:', ' '.join(s.name for s,t in threads if t.is_alive())
            sleep_count += 1
            time.sleep(0.1)
        print 'done waiting for threads!'

        os.system('mkdir -p /tmp/%s' % self.username)
        log_fn = os.path.join(self.git_status_dir, 'crabsubmitter.log')
        log = open(log_fn, 'wt')
        for name in sorted(results.keys()):
            log.write('*' * 250)
            log.write('\n\n%s\n' % name)
            log.write(results[name])
            log.write('\n\n')
        print 'log fn is', log_fn
    def listen(self):
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.socket.bind(("0.0.0.0", self.port))
        self.socket.listen(100)

        logging.info("Listening on port %d", self.port)

        self.running = True
        while self.running:
            # Wait for incoming connections.
            try:
                connection, address = self.socket.accept()

                # Check if we reach our thread limit.
                while threading.active_count() >= self.max_threads:
                    time.sleep(0.1)

                # Spawn a handler thread.
                thread = RequestHandlerThread(self, connection, address, self.choose_node())
                self.threads.append(thread)
                thread.start()
            except KeyboardInterrupt:
                logging.debug("Shutting down due to keyboard interrupt")
                break

        # Clean up any remaining idle threads.
        logging.debug("Shutting down %d active threads", threading.active_count())
        for thread in self.threads:
            if thread.is_alive():
                thread.join()

        self.socket.close()
Esempio n. 23
0
    def run(selif):

        db = MySQLdb.connect(host=PsikonOptions.TASK_DB_HOST, user=PsikonOptions.TASK_DB_USER, passwd=PsikonOptions.TASK_DB_PASS, db=PsikonOptions.TASK_DB_NAME)
        counter = 0

        while True:

            counter = counter + 1
           
            totalThreads = threading.active_count() - 1 
            if totalThreads < PsikonOptions.THREAD_LIMIT:

                realLimit = PsikonOptions.THREAD_LIMIT - totalThreads

                cur = db.cursor()
                cur.execute("""SELECT id FROM sys_tasks WHERE task_status=%s LIMIT %s""", (BlogTask.STATUS_PENDING, realLimit))
                
                while True:
                    row = cur.fetchone()
                    if row == False or row == None:
                        break
                    logger.info("New task ID: " + str(row[0]))
                    TaskThread(row[0]).start()
                    
            
            if counter == 10:
                logger.info("Threads: " + str(threading.active_count()))
                counter = 0

            time.sleep(1)
Esempio n. 24
0
def run(target_amount):
    if target_amount < 1:
        target_amount = 10

    ip_list = ip_getter.read_from_file()
    if len(ip_list) < 1:
        return

    result_queue = Queue.PriorityQueue()
    while(result_queue.qsize() < target_amount):
        cur_ip = get_random_ip_from_list(ip_list)
        print 'checking %s...' % cur_ip
        threading.Thread(target=do_test, args=(cur_ip, result_queue)).start()
        while(threading.active_count() > k_max_thread_count):
            time.sleep(2)

    while(threading.active_count() > 1):
        time.sleep(2)
    
    result_list = []
    while not result_queue.empty():
        c_ip = result_queue.get()
        c_info = c_ip.info()
        result_list.append(c_info[0])
        print "ip:%s delay:%d" % (c_info[0], c_info[1])

    for i in result_list:
        sys.stdout.write(i+'|')
Esempio n. 25
0
def scrape_all_disease_contents_once(disease_db):
    common.get_logger().warning("Scraping all disease contents once more...")
    main_thread = threading.currentThread()
    disease_names = list(disease_db[common.ALL_DISEASES_VIEW].keys())
    n_diseases = len(disease_names)
    i_disease = 0
    is_still_need_to_scrape = False
    # create threads to scrape disease contents
    while (i_disease < n_diseases):
        if (threading.active_count() - 1 < common.MAX_THREADS) and \
            (i_disease < n_diseases):
            disease_name = disease_names[i_disease]
            the_disease = disease_db[common.ALL_DISEASES_VIEW][disease_name]
            if not the_disease.is_already_scraped:
                is_still_need_to_scrape = True
                t = threading.Thread(target=scrape_all_contents_of_a_disease,
                                        args=(the_disease, i_disease, n_diseases))
                t.start()
            i_disease += 1

        if threading.active_count() - 1 >= common.MAX_THREADS:
            time.sleep(common.TIMEOUT_WAIT_THREAD_FINISH)

    # wait for all the threads to finish
    while (threading.active_count() > 1):
        time.sleep(common.TIMEOUT_WAIT_THREAD_FINISH)

    return is_still_need_to_scrape
Esempio n. 26
0
    def run(self):
        """
        This method creates a pool of threads, starts them, and waits for the
        'input_queue' to be empty before asking them to stop.
        Results, if any, will be available in the 'output_queue'.
        """
        self._set_pool_size()
        
        log.debug('Active threads: %d' % threading.active_count()) #@UndefinedVariable
        
        # Create threads and add them to the pool
        for i in range(self.pool_size): #@UnusedVariable
            thread = self.thread_class(self.in_queue, self.out_queue,
                                       **self._thread_args)
            thread.name = 'Worker-%02d' % i
            self._thread_pool.append(thread)
            thread.start()
        
        log.debug('Active threads: %d' % threading.active_count()) #@UndefinedVariable
        
        # Wait for the threads to process all the clients in the queue
        while not self.in_queue.empty():
            pass

        # Ask threads to stop
        for thread in self._thread_pool:
            thread.join()
        self.finished = True
def error_cleanup():
    global vm
    global schds

    for schd_job in schd_jobs:
        thread = threading.Thread(target=delete_scheduler_job, args=(schd_job.uuid, ))
        while threading.active_count() > 10:
            time.sleep(0.5)
        exc = sys.exc_info()
        thread.start()

    while threading.activeCount() > 1:
        exc = sys.exc_info()
        time.sleep(0.1)

    for schd_trigger in schd_triggers:
        thread = threading.Thread(target=delete_scheduler_trigger, args=(schd_trigger.uuid, ))
        while threading.active_count() > 10:
            time.sleep(0.5)
        exc = sys.exc_info()
        thread.start()

    while threading.activeCount() > 1:
        exc = sys.exc_info()
        time.sleep(0.1)

    if vm:
        try:
            vm.destroy()
	except:
            test_util.test_logger('expected exception when destroy VM since too many queued task')
Esempio n. 28
0
    def test_active_count_py2(self):
        self.assertEqual(threading.active_count(), 1)

        class TestingThread(threading.Thread):
            def __init__(self, _condition):
                super(TestingThread, self).__init__()
                self.__condition = _condition

            def run(self):
                self.__condition.acquire()
                try:  # also can use 'with self.__condition'
                    self.__condition.wait()
                finally:
                    self.__condition.release()

        condition = threading.Condition()
        thread = TestingThread(condition)
        thread.start()
        self.assertEqual(threading.active_count(), 2)

        condition.acquire()
        try:  # also can use 'with self.condition'
            condition.notify()
        finally:
            condition.release()

        thread.join()
Esempio n. 29
0
def test_thread_count():
    """
    スレッド生成数のテスト
    """
    before_num = threading.active_count()

    server = APNSProxyServer(dummy_setting)
    server.create_workers({
        "myApp1": {
            "application_id": "myApp1",
            "name": "My App1",
            "sandbox": False,
            "cert_file": "sample.cert",
            "key_file": "sample.key"
        },
        "myApp2": {
            "application_id": "myApp2",
            "name": "My App2",
            "sandbox": False,
            "cert_file": "sample.cert",
            "key_file": "sample.key"
        },
    }, 3)

    after_num = threading.active_count()

    eq_(before_num + 6, after_num)
Esempio n. 30
0
def main():
    # To avoid the bug of strptime in multiple threads, I invoke strptime before everything else
    # For more details, please refer to this: http://bugs.python.org/issue7980
    datetime.strptime('20141219', '%Y%m%d')

    def worker(*args, **kwargs):
        exector = SleepingExecutor(*args, **kwargs)
        exector.sleep_n_book_on_date()

    def fake_worker(*args, **kwargs):
        exector = SleepingExecutorWithoutBooking(*args, **kwargs)
        exector.sleep_n_book_on_date()

    all_args = [(worker, ['210106198404304617', 'chen84430mo', '2'], {'time_period': 'Morning'}),
                (worker, ['230107198706211520', '0621', '2'], {'time_period': 'Morning'}),
                #(worker, ['130221198312055114', '1205', '2'], {'time_period': 'Morning'})
    ]

    working_threads = [Thread(target=func, args=args, kwargs=kwargs) for func, args, kwargs in all_args]

    existed_threads = active_count()
    print('Before starting, {0} threads are running'.format(existed_threads))
    for a_thread in working_threads:
        rslt = a_thread.start()
        print(a_thread.is_alive())

    while active_count() > existed_threads:
        print('{0} threads are running'.format(active_count()))
        time.sleep(60)
def main():
    add_threading = threading.Thread(target=thread_job)  #添加一个线程
    add_threading.start()
    print(threading.active_count())  #激活进程数
    print(threading.enumerate())  #激活的进程
    print(threading.current_thread())  #当前进程
Esempio n. 32
0
import threading
from time import sleep


class MyThraed1(threading.Thread):
    def run(self):
        for i in range(3):
            print("唱歌")
            sleep(1)

class MyThraed2(threading.Thread):
    def run(self):
        for i in range(3):
            print("跳舞")
            sleep(1)

t1 = MyThraed1()
t1.start()
t2 = MyThraed2()
t2.start()
# 打印所有线程
print(threading.enumerate())
print(threading.active_count())
print(threading.current_thread())
print(threading.main_thread())
Esempio n. 33
0
def job_start_worker(p_engine, jobname, envname, tgt_connector,
                     tgt_connector_env, nowait, parallel, monitor,
                     joblist_class):
    """
    Start job
    param1: p_engine: engine name from configuration
    param2: jobname: job name to list
    param3: envname: environment name
    param4: tgt_connector: target connector for multi tenant
    param5: tgt_connector_env: target connector environment for multi tenant
    param6: nowait: no wait for job to complete
    param7: parallel: number of concurrent masking jobs
    param8: monitor: enable progress bar
    param9: joblist_class - DxJobsList or DxProfileJobsList
    return 0 if environment found
    """

    job_list = [x for x in jobname]
    jobsno = len(job_list)

    posno = 1
    no_of_active_threads = 1

    logger = logging.getLogger()

    logger.debug("parallel % s active count %s" % (parallel, active_count()))

    if monitor:
        no_of_active_threads = 2
        jobsbar = tqdm(total=jobsno,
                       desc="No of started jobs",
                       bar_format="{desc}: |{bar}| {n_fmt}/{total_fmt}")
        time.sleep(1)

    while len(job_list) > 0:

        ac = parallel - active_count() + no_of_active_threads

        logger.debug("parallel % s ac %s active count %s" %
                     (parallel, ac, active_count()))

        try:
            for i in range(1, ac + 1):
                try:
                    single_jobname = job_list.pop()
                    logger.debug("starting job %s" % single_jobname)
                    t = Thread(target=job_selector,
                               kwargs={
                                   'p_engine': p_engine,
                                   'jobname': single_jobname,
                                   'envname': envname,
                                   'function_to_call': 'do_start',
                                   'tgt_connector': tgt_connector,
                                   'tgt_connector_env': tgt_connector_env,
                                   'nowait': nowait,
                                   'posno': posno,
                                   'lock': lock,
                                   'monitor': monitor,
                                   'joblist_class': joblist_class
                               })
                    t.start()
                    posno = posno + 1
                    logger.debug("before update")
                    time.sleep(1)
                    if monitor:
                        jobsbar.update(1)
                        logger.debug("after update ")

                except IndexError:
                    pass
        except Exception:
            print_error("Error: unable to start thread")

        # wait 1 sec before kicking off next job
        time.sleep(1)

    # Wait for all threads to complete
    while (active_count() > no_of_active_threads):
        logger.debug("waiting for threads - active count %s" % active_count())
        time.sleep(1)

    logger.debug("all threds finished %s" % active_count())

    if monitor:
        jobsbar.close()

    logger.debug("After close")
    print_message("\n" * posno)

    if joblist_class == "DxJobsList":
        return dxm.lib.DxJobs.DxJobCounter.ret
    else:
        return dxm.lib.DxJobs.DxJobCounter.profileret
Esempio n. 34
0
 def on_move(self, x, y):
     if threading.active_count() == 2:
         self.listener_mouse.stop()
     self.logging.info("{0}|{1}|None|None|Move|".format(x, y) +
                       self.get_foreground_window_title())
Esempio n. 35
0
import os
import threading

print(f'Python process running with process id: {os.getpid()}')

total_threads = threading.active_count()
thread_name = threading.current_thread().name

print(f'Python is currently running {total_threads} thread(s)')
print(f'The current thread is {thread_name}')
Esempio n. 36
0
        else:
            print('beklenmedik fail')


num = int('0')
print(Fore.RED + '''
.d88b.                                       
8P  Y8 8d8b. .d88b .d8b .d88 8d8b.d8b. .d88b 
8b  d8 8P Y8 8.dP' 8    8  8 8P Y8P Y8 8.dP' 
`Y88P' 8   8 `Y88P `Y8P `Y88 8   8   8 `Y88P 
                                            ''')

print(Style.RESET_ALL)
threadsnum = input("Threads :")
while 1:
    if threading.active_count() < int(threadsnum):
        if len(User) > num:
            #                   randomproxy = proxys3[random.randint(1,len(proxys3))]
            #                   proxsel = {
            #                       'http': 'http://'+randomproxy,
            #                       'https': 'https://'+randomproxy
            #                       }
            threading.Thread(target=d1, args=(User[num], Pass[num])).start()
            num += 1
            #print('Taranmamış user sayısı=',kalan)
            #kalan -= 1
        else:
            print(Fore.GREEN +
                  '°°°·.°·..·°¯°·._.· 𝔹𝕚𝕥𝕥𝕚 𝕔𝕒𝕟𝕚𝕞 ·._.·°¯°·.·° .·°°°')
            print(Style.RESET_ALL)
            time.sleep(3)
def do_channels_search(item):
    logger.info("streamondemand.channels.buscadorall do_channels_search")

    tecleado, category, title_year = item.extra.split('{}')

    try:
        title_year = int(title_year)
    except:
        title_year = 0

    itemlist = []

    channels_path = os.path.join(config.get_runtime_path(), "channels",
                                 '*.xml')
    logger.info("streamondemand.channels.buscador channels_path=" +
                channels_path)

    channel_language = config.get_setting("channel_language")
    logger.info("streamondemand.channels.buscador channel_language=" +
                channel_language)
    if channel_language == "":
        channel_language = "all"
        logger.info("streamondemand.channels.buscador channel_language=" +
                    channel_language)

    progreso = platformtools.dialog_progress_bg(NLS_Looking_For %
                                                urllib.unquote_plus(tecleado))

    channel_files = sorted(glob.glob(channels_path))

    search_results = Queue.Queue()
    completed_channels = 0
    number_of_channels = 0

    start_time = int(time.time())

    for infile in channel_files:

        basename_without_extension = os.path.basename(infile)[:-4]

        channel_parameters = channeltools.get_channel_parameters(
            basename_without_extension)

        # No busca si es un canal inactivo
        if channel_parameters["active"] != True:
            continue

        # En caso de busqueda por categorias
        if category and category not in channel_parameters["categories"]:
            continue

        # No busca si es un canal para adultos, y el modo adulto está desactivado
        if channel_parameters["adult"] == True and config.get_setting(
                "adult_mode") == False:
            continue

        # No busca si el canal es en un idioma filtrado
        if channel_language != "all" and channel_parameters[
                "language"] != channel_language:
            continue

        # No busca si es un canal excluido de la busqueda global
        include_in_global_search = channel_parameters[
            "include_in_global_search"]
        if include_in_global_search == True:
            # Buscar en la configuracion del canal
            include_in_global_search = config.get_setting(
                "include_in_global_search", basename_without_extension)
        if include_in_global_search == False:
            continue

        t = Thread(target=channel_search,
                   args=[
                       search_results, channel_parameters, category,
                       title_year, tecleado
                   ])
        t.setDaemon(True)
        t.start()
        number_of_channels += 1

        while threading.active_count() >= MAX_THREADS:

            delta_time = int(time.time()) - start_time
            if len(itemlist) <= 0:
                timeout = None  # No result so far,lets the thread to continue working until a result is returned
            elif delta_time >= TIMEOUT_TOTAL:
                progreso.close()
                itemlist = sorted(itemlist, key=lambda item: item.fulltitle)
                return itemlist
            else:
                timeout = TIMEOUT_TOTAL - delta_time  # Still time to gather other results

            progreso.update(completed_channels * 100 / number_of_channels)

            try:
                itemlist.extend(search_results.get(timeout=timeout))
                completed_channels += 1
            except:
                progreso.close()
                itemlist = sorted(itemlist, key=lambda item: item.fulltitle)
                return itemlist

    while completed_channels < number_of_channels:

        delta_time = int(time.time()) - start_time
        if len(itemlist) <= 0:
            timeout = None  # No result so far,lets the thread to continue working until a result is returned
        elif delta_time >= TIMEOUT_TOTAL:
            break  # At least a result matching the searched title has been found, lets stop the search
        else:
            timeout = TIMEOUT_TOTAL - delta_time  # Still time to gather other results

        progreso.update(completed_channels * 100 / number_of_channels)

        try:
            itemlist.extend(search_results.get(timeout=timeout))
            completed_channels += 1
        except:
            # Expired timeout raise an exception
            break

    progreso.close()

    itemlist = sorted(itemlist, key=lambda item: item.fulltitle)

    return itemlist
def thread_jobs():  # 定义要添加的线程
    print('已激活的线程数: %s' % threading.active_count())
    print('所有线程信息: %s' % threading.enumerate())
    print('正在运行的线程: %s' % threading.current_thread())
Esempio n. 39
0
        # Some backoff if we fail to run
        time.sleep(1.0)

assert os.path.exists(INPUT_DIR), "Invalid input directory"

if os.path.exists(OUTPUT_DIR):
    print("Deleting old output directory")
    shutil.rmtree(OUTPUT_DIR)

print("Creating output directory")
os.mkdir(OUTPUT_DIR)

# Disable AFL affinity as we do it better
os.environ["AFL_NO_AFFINITY"] = "1"

for cpu in range(0, NUM_CPUS):
    threading.Timer(0.0, do_work, args=[cpu]).start()

    # Let master stabilize first
    if cpu == 0:
        time.sleep(1.0)

while threading.active_count() > 1:
    time.sleep(5.0)

    try:
        subprocess.check_call(["/mnt/ramdisk/soteria/s2s_transform/libtooling/build/afl-whatsup", "-s", OUTPUT_DIR])
    except:
        pass
    lock.acquire()
    # if now all of threads count less than limit, ok
    if len(_MultiThreading.queue_t) \
            > thread_max_count:
        lock.release()
        # wait last threads work end
        _MultiThreading.event_t.wait()
    else:
        lock.release()
    # continue to create new one
    sub_thread = _MultiThreading(lock, i, sub_url_list, num2_list,
                                 thread_max_count)
    # set every download sub-process is non-daemon process
    sub_thread.setDaemon(False)
    sub_thread.create()
    # parent thread wait all sub-thread end
    while aliveThreadCnt > 1:
        # global variable update
        _alivethread_counter = threading.active_count()
        # when alive thread count change, print its value
        if aliveThreadCnt != _alivethread_counter:
            # update alive thread count
            aliveThreadCnt = _alivethread_counter

print('Gather all tbt data complete, elapsed time %.2fs' % total_elapsedtime)
print('Crawler has completed work')
print('code by </MATRIX>@Neod Anderjon(LeaderN)')

# =================================================================
# code by </MATRIX>@Neod Anderjon
Esempio n. 41
0
def freq():
    if request.method == 'GET':
        if threading.active_count() < 2:
            threading.Thread(target=play(request.args['freq'], 0.01)).start()
    return "Zdravo"
Esempio n. 42
0
# @Author : Ivan-杨杨兮 ([email protected])
# @Link   : www.cgartech.com
# @Date   : 11/4/2018, 9:13:34 PM

# 多线程 锁

import threading, time

lock = threading.Lock()


def run(n):
    lock.acquire()  # .acquire() 获取一把锁
    print('task %s' % n)
    lock.release()  # 释放锁
    time.sleep(2)


start_time = time.time()
for i in range(50):
    t = threading.Thread(target=run, args=('t-%s' % i, ))
    t.start()

# for t in t_objs:
#     t.join() # 等待子线程结束,再继续主线程
while threading.active_count(
) != 7:  # .active_count() 获取当前线程数量,不等于1表示还有子线程存在 (经测试,mac上起来就7个线程,不知道是和原因)
    print(threading.active_count())
    time.sleep(.5)

print('all threads has finished. cost:', time.time() - start_time)
Esempio n. 43
0
def run(args):
    tests = []
    stdtty = tester.rt.console.save()
    opts = None
    default_exefilter = '*.exe'
    try:
        optargs = {
            '--rtems-tools': 'The path to the RTEMS tools',
            '--rtems-bsp': 'The RTEMS BSP to run the test on',
            '--user-config': 'Path to your local user configuration INI file',
            '--list-bsps': 'List the supported BSPs',
            '--debug-trace': 'Debug trace based on specific flags',
            '--stacktrace': 'Dump a stack trace on a user termination (^C)'
        }
        opts = tester.rt.options.load(args, optargs=optargs)
        log.notice('RTEMS Testing - Run, %s' % (version.string()))
        if opts.find_arg('--list-bsps'):
            tester.rt.bsps.list(opts)
        opts.log_info()
        log.output('Host: ' + host.label(mode='all'))
        debug_trace = opts.find_arg('--debug-trace')
        if debug_trace:
            if len(debug_trace) != 1:
                debug_trace = 'output,' + debug_trace[1]
            else:
                raise error.general(
                    'no debug flags, can be: console,gdb,output')
        else:
            debug_trace = 'output'
        opts.defaults['debug_trace'] = debug_trace
        rtems_tools = opts.find_arg('--rtems-tools')
        if rtems_tools:
            if len(rtems_tools) != 2:
                raise error.general('invalid RTEMS tools option')
            rtems_tools = rtems_tools[1]
        else:
            rtems_tools = '%{_prefix}'
        bsp = opts.find_arg('--rtems-bsp')
        if bsp is None or len(bsp) != 2:
            raise error.general('RTEMS BSP not provided or an invalid option')
        bsp = tester.rt.config.load(bsp[1], opts)
        bsp_config = opts.defaults.expand(opts.defaults['tester'])
        executables = find_executables(opts.params())
        if len(executables) != 1:
            raise error.general('one executable required, found %d' %
                                (len(executables)))
        opts.defaults['test_disable_header'] = '1'
        reports = tester.rt.report.report(1)
        start_time = datetime.datetime.now()
        opts.defaults['exe_trace'] = debug_trace
        tst = test(1, 1, reports, executables[0], rtems_tools, bsp, bsp_config,
                   opts)
        tst.run()
        end_time = datetime.datetime.now()
        total_time = 'Run time     : %s' % (str(end_time - start_time))
        log.notice(total_time)

    except error.general as gerr:
        print(gerr)
        sys.exit(1)
    except error.internal as ierr:
        print(ierr)
        sys.exit(1)
    except error.exit:
        sys.exit(2)
    except KeyboardInterrupt:
        if opts is not None and opts.find_arg('--stacktrace'):
            print('}} dumping:', threading.active_count())
            for t in threading.enumerate():
                print('}} ', t.name)
            print(stacktraces.trace())
        log.notice('abort: user terminated')
        sys.exit(1)
    finally:
        tester.rt.console.restore(stdtty)
    sys.exit(0)
Esempio n. 44
0
    traffic_intensity = 0

    def __init__(self, t_intensity):
        self.traffic_intensity = t_intensity

    def user_node_workload(self):
        return self.traffic_intensity


mqtt.Client.connected_flag = False

communicate_device = CommunicateDevice()
communicate_device.create_multi_connections()

active_thread_num = threading.active_count()
print("current threads = ", active_thread_num)
print("Creating  Connections ", quantity_user_node_clients,
      " user_node_clients")

print("Publishing ")
Run_Flag = True
try:
    while Run_Flag:
        client = user_node_clients[0]["client"]
        pub_topic = user_node_clients[0]["pub_topic"]
        if client.connected_flag:
            #print("user device "+ str(7) + " is publishing~~~~~~~~~~")
            client.publish(pub_topic, str(request_resource_capacity))
        time.sleep(1.5)
except KeyboardInterrupt:
Esempio n. 45
0
 def exp_get_stats(self):
     res = "OpenERP server: %d threads\n" % threading.active_count()
     res += netsvc.Server.allStats()
     return res
def main():
    print(threading.active_count(), '1')
    print(threading.current_thread(), '2')
    print(threading.enumerate(), '3')
Esempio n. 47
0
from django.contrib.auth.signals import user_logged_in, user_logged_out
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver

from desktop.lib.metrics import global_registry

global_registry().gauge_callback(
    name='python.threads.count',
    callback=lambda: len(threading.enumerate()),
    label='Thread count',
    description='Number of threads',
)

global_registry().gauge_callback(
    name='python.threads.active',
    callback=lambda: threading.active_count(),
    label='Active thread count',
    description='Number of active threads',
)

global_registry().gauge_callback(
    name='python.threads.daemon',
    callback=lambda: sum(1 for thread in threading.enumerate()
                         if thread.isDaemon()),
    label='Daemon thread count',
    description='Number of daemon threads',
)

# ------------------------------------------------------------------------------

global_registry().gauge_callback(
Esempio n. 48
0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#liuhao
import threading
import time

def sayhi(num):
    print('running on number:%s'%num)
    time.sleep(3)
    print('执行完毕:',num)

if __name__ == '__main__':
    t1=threading.Thread(target=sayhi,args=(1,)) #生成一个线程实例
    t2=threading.Thread(target=sayhi,args=(2,)) #生成第二个线程实例
    t1.start()
    t2.start()
    print(threading.active_count()) #获取正在运行的线程总数
    print(t1.getName()) #获取线程名
    print(t2.getName())
Esempio n. 49
0
#!_*_coding:utf-8_*_
#__author__:"Alex huang"

import threading, time

import threading, time


def run(n):
    semaphore.acquire()
    print('threading in %s' % n)
    time.sleep(1)
    semaphore.release()


if __name__ == "__main__":
    semaphore = threading.BoundedSemaphore(5)
    for i in range(25):
        t = threading.Thread(target=run, args=(i, ))
        t.start()
    while threading.active_count() != 1:
        pass
    else:
        print("All threading is done")
Esempio n. 50
0
File: build.py Progetto: piokuc/cime
def _build_model(build_threaded, exeroot, incroot, complist, lid, caseroot,
                 cimeroot, compiler, buildlist, comp_interface):
    ###############################################################################
    logs = []

    thread_bad_results = []
    for model, comp, nthrds, _, config_dir in complist:
        if buildlist is not None and model.lower() not in buildlist:
            continue

        # aquap has a dependency on atm so we will build it after the threaded loop
        if comp == "aquap":
            logger.debug("Skip aquap ocn build here")
            continue

        # coupler handled seperately
        if model == "cpl":
            continue

        # special case for clm
        # clm 4_5 and newer is a shared (as in sharedlibs, shared by all tests) library
        # (but not in E3SM) and should be built in build_libraries
        if get_model() != "e3sm" and comp == "clm":
            continue
        else:
            logger.info("         - Building {} Library ".format(model))

        smp = nthrds > 1 or build_threaded

        bldroot = os.path.join(exeroot, model, "obj")
        libroot = os.path.join(exeroot, "lib")
        file_build = os.path.join(exeroot, "{}.bldlog.{}".format(model, lid))
        logger.debug("bldroot is {}".format(bldroot))
        logger.debug("libroot is {}".format(libroot))

        # make sure bldroot and libroot exist
        for build_dir in [bldroot, libroot]:
            if not os.path.exists(build_dir):
                os.makedirs(build_dir)

        # build the component library
        # thread_bad_results captures error output from thread (expected to be empty)
        # logs is a list of log files to be compressed and added to the case logs/bld directory
        t = threading.Thread(target=_build_model_thread,
                             args=(config_dir, model, comp, caseroot, libroot,
                                   bldroot, incroot, file_build,
                                   thread_bad_results, smp, compiler))
        t.start()

        logs.append(file_build)

    # Wait for threads to finish
    while (threading.active_count() > 1):
        time.sleep(1)

    expect(not thread_bad_results, "\n".join(thread_bad_results))

    #
    # Now build the executable
    #

    if not buildlist:
        cime_model = get_model()
        file_build = os.path.join(exeroot,
                                  "{}.bldlog.{}".format(cime_model, lid))

        ufs_driver = os.environ.get("UFS_DRIVER")
        if cime_model == 'ufs' and ufs_driver == 'nems':
            config_dir = os.path.join(cimeroot, os.pardir, "src", "model",
                                      "NEMS", "cime", "cime_config")
        else:
            config_dir = os.path.join(cimeroot, "src", "drivers",
                                      comp_interface, "cime_config")

        expect(os.path.exists(config_dir),
               "Config directory not found {}".format(config_dir))
        if "cpl" in complist:
            bldroot = os.path.join(exeroot, "cpl", "obj")
            if not os.path.isdir(bldroot):
                os.makedirs(bldroot)
        logger.info("Building {} from {}/buildexe with output to {} ".format(
            cime_model, config_dir, file_build))

        with open(file_build, "w") as fd:
            stat = run_cmd("{}/buildexe {} {} {} ".format(
                config_dir, caseroot, libroot, bldroot),
                           from_dir=bldroot,
                           arg_stdout=fd,
                           arg_stderr=subprocess.STDOUT)[0]

        analyze_build_log("{} exe".format(cime_model), file_build, compiler)
        expect(stat == 0,
               "BUILD FAIL: buildexe failed, cat {}".format(file_build))

        # Copy the just-built ${MODEL}.exe to ${MODEL}.exe.$LID
        safe_copy("{}/{}.exe".format(exeroot, cime_model),
                  "{}/{}.exe.{}".format(exeroot, cime_model, lid))

        logs.append(file_build)

    return logs
Esempio n. 51
0
def main():
    """
    Main function of sqlmap when running from command line.
    """

    try:
        dirtyPatches()
        resolveCrossReferences()
        checkEnvironment()
        setPaths(modulePath())
        banner()

        # Store original command line options for possible later restoration
        args = cmdLineParser()
        cmdLineOptions.update(
            args.__dict__ if hasattr(args, "__dict__") else args)
        initOptions(cmdLineOptions)

        if checkPipedInput():
            conf.batch = True

        if conf.get("api"):
            # heavy imports
            from lib.utils.api import StdDbOut
            from lib.utils.api import setRestAPILog

            # Overwrite system standard output and standard error to write
            # to an IPC database
            sys.stdout = StdDbOut(conf.taskid, messagetype="stdout")
            sys.stderr = StdDbOut(conf.taskid, messagetype="stderr")
            setRestAPILog()

        conf.showTime = True
        dataToStdout("[!] legal disclaimer: %s\n\n" % LEGAL_DISCLAIMER,
                     forceOutput=True)
        dataToStdout("[*] starting @ %s\n\n" % time.strftime("%X /%Y-%m-%d/"),
                     forceOutput=True)

        init()

        if not conf.updateAll:
            # Postponed imports (faster start)
            if conf.smokeTest:
                from lib.core.testing import smokeTest
                os._exitcode = 1 - (smokeTest() or 0)
            elif conf.vulnTest:
                from lib.core.testing import vulnTest
                os._exitcode = 1 - (vulnTest() or 0)
            else:
                from lib.controller.controller import start
                if conf.profile:
                    from lib.core.profiling import profile
                    globals()["start"] = start
                    profile()
                else:
                    try:
                        if conf.crawlDepth and conf.bulkFile:
                            targets = getFileItems(conf.bulkFile)

                            for i in xrange(len(targets)):
                                target = None

                                try:
                                    kb.targets = OrderedSet()
                                    target = targets[i]

                                    if not re.search(r"(?i)\Ahttp[s]*://",
                                                     target):
                                        target = "http://%s" % target

                                    infoMsg = "starting crawler for target URL '%s' (%d/%d)" % (
                                        target, i + 1, len(targets))
                                    logger.info(infoMsg)

                                    crawl(target)
                                except Exception as ex:
                                    if target and not isinstance(
                                            ex, SqlmapUserQuitException):
                                        errMsg = "problem occurred while crawling '%s' ('%s')" % (
                                            target, getSafeExString(ex))
                                        logger.error(errMsg)
                                    else:
                                        raise
                                else:
                                    if kb.targets:
                                        start()
                        else:
                            start()
                    except Exception as ex:
                        os._exitcode = 1

                        if "can't start new thread" in getSafeExString(ex):
                            errMsg = "unable to start new threads. Please check OS (u)limits"
                            logger.critical(errMsg)
                            raise SystemExit
                        else:
                            raise

    except SqlmapUserQuitException:
        if not conf.batch:
            errMsg = "user quit"
            logger.error(errMsg)

    except (SqlmapSilentQuitException, bdb.BdbQuit):
        pass

    except SqlmapShellQuitException:
        cmdLineOptions.sqlmapShell = False

    except SqlmapBaseException as ex:
        errMsg = getSafeExString(ex)
        logger.critical(errMsg)

        os._exitcode = 1

        raise SystemExit

    except KeyboardInterrupt:
        print()

    except EOFError:
        print()

        errMsg = "exit"
        logger.error(errMsg)

    except SystemExit as ex:
        os._exitcode = ex.code or 0

    except:
        print()
        errMsg = unhandledExceptionMessage()
        excMsg = traceback.format_exc()
        valid = checkIntegrity()

        os._exitcode = 255

        if any(_ in excMsg for _ in ("MemoryError", "Cannot allocate memory")):
            errMsg = "memory exhaustion detected"
            logger.critical(errMsg)
            raise SystemExit

        elif any(_ in excMsg for _ in ("No space left", "Disk quota exceeded",
                                       "Disk full while accessing")):
            errMsg = "no space left on output device"
            logger.critical(errMsg)
            raise SystemExit

        elif any(_ in excMsg for _ in ("The paging file is too small", )):
            errMsg = "no space left for paging file"
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg
                 for _ in ("Access is denied", "subprocess", "metasploit")):
            errMsg = "permission error occurred while running Metasploit"
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg for _ in ("Permission denied", "metasploit")):
            errMsg = "permission error occurred while using Metasploit"
            logger.critical(errMsg)
            raise SystemExit

        elif "Read-only file system" in excMsg:
            errMsg = "output device is mounted as read-only"
            logger.critical(errMsg)
            raise SystemExit

        elif "Insufficient system resources" in excMsg:
            errMsg = "resource exhaustion detected"
            logger.critical(errMsg)
            raise SystemExit

        elif "OperationalError: disk I/O error" in excMsg:
            errMsg = "I/O error on output device"
            logger.critical(errMsg)
            raise SystemExit

        elif "Violation of BIDI" in excMsg:
            errMsg = "invalid URL (violation of Bidi IDNA rule - RFC 5893)"
            logger.critical(errMsg)
            raise SystemExit

        elif "Invalid IPv6 URL" in excMsg:
            errMsg = "invalid URL ('%s')" % excMsg.strip().split('\n')[-1]
            logger.critical(errMsg)
            raise SystemExit

        elif "_mkstemp_inner" in excMsg:
            errMsg = "there has been a problem while accessing temporary files"
            logger.critical(errMsg)
            raise SystemExit

        elif any(_ in excMsg for _ in ("tempfile.mkdtemp", "tempfile.mkstemp",
                                       "tempfile.py")):
            errMsg = "unable to write to the temporary directory '%s'. " % tempfile.gettempdir(
            )
            errMsg += "Please make sure that your disk is not full and "
            errMsg += "that you have sufficient write permissions to "
            errMsg += "create temporary files and/or directories"
            logger.critical(errMsg)
            raise SystemExit

        elif "Permission denied: '" in excMsg:
            match = re.search(r"Permission denied: '([^']*)", excMsg)
            errMsg = "permission error occurred while accessing file '%s'" % match.group(
                1)
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg for _ in ("twophase", "sqlalchemy")):
            errMsg = "please update the 'sqlalchemy' package (>= 1.1.11) "
            errMsg += "(Reference: 'https://qiita.com/tkprof/items/7d7b2d00df9c5f16fffe')"
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg for _ in ("scramble_caching_sha2", "TypeError")):
            errMsg = "please downgrade the 'PyMySQL' package (=< 0.8.1) "
            errMsg += "(Reference: 'https://github.com/PyMySQL/PyMySQL/issues/700')"
            logger.critical(errMsg)
            raise SystemExit

        elif "must be pinned buffer, not bytearray" in excMsg:
            errMsg = "error occurred at Python interpreter which "
            errMsg += "is fixed in 2.7. Please update accordingly "
            errMsg += "(Reference: 'https://bugs.python.org/issue8104')"
            logger.critical(errMsg)
            raise SystemExit

        elif "hash_randomization" in excMsg:
            errMsg = "error occurred at Python interpreter which "
            errMsg += "is fixed in 2.7.3. Please update accordingly "
            errMsg += "(Reference: 'https://docs.python.org/2/library/sys.html')"
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg for _ in ("Resource temporarily unavailable",
                                       "os.fork()", "dictionaryAttack")):
            errMsg = "there has been a problem while running the multiprocessing hash cracking. "
            errMsg += "Please rerun with option '--threads=1'"
            logger.critical(errMsg)
            raise SystemExit

        elif "can't start new thread" in excMsg:
            errMsg = "there has been a problem while creating new thread instance. "
            errMsg += "Please make sure that you are not running too many processes"
            if not IS_WIN:
                errMsg += " (or increase the 'ulimit -u' value)"
            logger.critical(errMsg)
            raise SystemExit

        elif "can't allocate read lock" in excMsg:
            errMsg = "there has been a problem in regular socket operation "
            errMsg += "('%s')" % excMsg.strip().split('\n')[-1]
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg for _ in ("pymysql", "configparser")):
            errMsg = "wrong initialization of pymsql detected (using Python3 dependencies)"
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg
                 for _ in ("ntlm", "socket.error, err", "SyntaxError")):
            errMsg = "wrong initialization of python-ntlm detected (using Python2 syntax)"
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg for _ in ("drda", "to_bytes")):
            errMsg = "wrong initialization of drda detected (using Python3 syntax)"
            logger.critical(errMsg)
            raise SystemExit

        elif "'WebSocket' object has no attribute 'status'" in excMsg:
            errMsg = "wrong websocket library detected"
            errMsg += " (Reference: 'https://github.com/sqlmapproject/sqlmap/issues/4572#issuecomment-775041086')"
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg for _ in ("window = tkinter.Tk()", )):
            errMsg = "there has been a problem in initialization of GUI interface "
            errMsg += "('%s')" % excMsg.strip().split('\n')[-1]
            logger.critical(errMsg)
            raise SystemExit

        elif any(_ in excMsg for _ in ("unable to access item 'liveTest'", )):
            errMsg = "detected usage of files from different versions of sqlmap"
            logger.critical(errMsg)
            raise SystemExit

        elif kb.get("dumpKeyboardInterrupt"):
            raise SystemExit

        elif any(_ in excMsg for _ in ("Broken pipe", )):
            raise SystemExit

        elif valid is False:
            errMsg = "code integrity check failed (turning off automatic issue creation). "
            errMsg += "You should retrieve the latest development version from official GitHub "
            errMsg += "repository at '%s'" % GIT_PAGE
            logger.critical(errMsg)
            print()
            dataToStdout(excMsg)
            raise SystemExit

        elif any(_ in excMsg for _ in ("tamper/", "waf/")):
            logger.critical(errMsg)
            print()
            dataToStdout(excMsg)
            raise SystemExit

        elif any(_ in excMsg
                 for _ in ("ImportError", "ModuleNotFoundError",
                           "Can't find file for module",
                           "SAXReaderNotAvailable",
                           "source code string cannot contain null bytes",
                           "No module named", "tp_name field")):
            errMsg = "invalid runtime environment ('%s')" % excMsg.split(
                "Error: ")[-1].strip()
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg
                 for _ in ("SyntaxError: Non-ASCII character", ".py on line",
                           "but no encoding declared")):
            errMsg = "invalid runtime environment ('%s')" % excMsg.split(
                "Error: ")[-1].strip()
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg for _ in ("No such file", "_'")):
            errMsg = "corrupted installation detected ('%s'). " % excMsg.strip(
            ).split('\n')[-1]
            errMsg += "You should retrieve the latest development version from official GitHub "
            errMsg += "repository at '%s'" % GIT_PAGE
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg for _ in ("No such file", "sqlmap.conf", "Test")):
            errMsg = "you are trying to run (hidden) development tests inside the production environment"
            logger.critical(errMsg)
            raise SystemExit

        elif all(_ in excMsg
                 for _ in ("HTTPNtlmAuthHandler",
                           "'str' object has no attribute 'decode'")):
            errMsg = "package 'python-ntlm' has a known compatibility issue with the "
            errMsg += "Python 3 (Reference: 'https://github.com/mullender/python-ntlm/pull/61')"
            logger.critical(errMsg)
            raise SystemExit

        elif "'DictObject' object has no attribute '" in excMsg and all(
                _ in errMsg for _ in ("(fingerprinted)", "(identified)")):
            errMsg = "there has been a problem in enumeration. "
            errMsg += "Because of a considerable chance of false-positive case "
            errMsg += "you are advised to rerun with switch '--flush-session'"
            logger.critical(errMsg)
            raise SystemExit

        elif "AttributeError: 'module' object has no attribute 'F_GETFD'" in excMsg:
            errMsg = "invalid runtime (\"%s\") " % excMsg.split(
                "Error: ")[-1].strip()
            errMsg += "(Reference: 'https://stackoverflow.com/a/38841364' & 'https://bugs.python.org/issue24944#msg249231')"
            logger.critical(errMsg)
            raise SystemExit

        elif "bad marshal data (unknown type code)" in excMsg:
            match = re.search(r"\s*(.+)\s+ValueError", excMsg)
            errMsg = "one of your .pyc files are corrupted%s" % (
                " ('%s')" % match.group(1) if match else "")
            errMsg += ". Please delete .pyc files on your system to fix the problem"
            logger.critical(errMsg)
            raise SystemExit

        for match in re.finditer(r'File "(.+?)", line', excMsg):
            file_ = match.group(1)
            try:
                file_ = os.path.relpath(file_, os.path.dirname(__file__))
            except ValueError:
                pass
            file_ = file_.replace("\\", '/')
            if "../" in file_:
                file_ = re.sub(r"(\.\./)+", '/', file_)
            else:
                file_ = file_.lstrip('/')
            file_ = re.sub(r"/{2,}", '/', file_)
            excMsg = excMsg.replace(match.group(1), file_)

        errMsg = maskSensitiveData(errMsg)
        excMsg = maskSensitiveData(excMsg)

        if conf.get("api") or not valid:
            logger.critical("%s\n%s" % (errMsg, excMsg))
        else:
            logger.critical(errMsg)
            dataToStdout("%s\n" %
                         setColor(excMsg.strip(), level=logging.CRITICAL))
            createGithubIssue(errMsg, excMsg)

    finally:
        kb.threadContinue = False

        if getDaysFromLastUpdate() > LAST_UPDATE_NAGGING_DAYS:
            warnMsg = "your sqlmap version is outdated"
            logger.warn(warnMsg)

        if conf.get("showTime"):
            dataToStdout("\n[*] ending @ %s\n\n" %
                         time.strftime("%X /%Y-%m-%d/"),
                         forceOutput=True)

        kb.threadException = True

        if kb.get("tempDir"):
            for prefix in (MKSTEMP_PREFIX.IPC, MKSTEMP_PREFIX.TESTING,
                           MKSTEMP_PREFIX.COOKIE_JAR,
                           MKSTEMP_PREFIX.BIG_ARRAY):
                for filepath in glob.glob(
                        os.path.join(kb.tempDir, "%s*" % prefix)):
                    try:
                        os.remove(filepath)
                    except OSError:
                        pass

            if not filterNone(
                    filepath
                    for filepath in glob.glob(os.path.join(kb.tempDir, '*'))
                    if not any(
                        filepath.endswith(_)
                        for _ in (".lock", ".exe", ".so",
                                  '_'))):  # ignore junk files
                try:
                    shutil.rmtree(kb.tempDir, ignore_errors=True)
                except OSError:
                    pass

        if conf.get("hashDB"):
            conf.hashDB.flush(True)
            conf.hashDB.close()  # NOTE: because of PyPy

        if conf.get("harFile"):
            try:
                with openFile(conf.harFile, "w+b") as f:
                    json.dump(conf.httpCollector.obtain(),
                              fp=f,
                              indent=4,
                              separators=(',', ': '))
            except SqlmapBaseException as ex:
                errMsg = getSafeExString(ex)
                logger.critical(errMsg)

        if conf.get("api"):
            conf.databaseCursor.disconnect()

        if conf.get("dumper"):
            conf.dumper.flush()

        # short delay for thread finalization
        _ = time.time()
        while threading.active_count() > 1 and (
                time.time() - _) > THREAD_FINALIZATION_TIMEOUT:
            time.sleep(0.01)

        if cmdLineOptions.get("sqlmapShell"):
            cmdLineOptions.clear()
            conf.clear()
            kb.clear()
            conf.disableBanner = True
            main()
Esempio n. 52
0
#!/usr/bin/env python3
#

from threading import Thread
import threading
from multiprocessing import Process
import os


def work():
    import time
    time.sleep(3)
    print('sub thread:\t', threading.current_thread().getName())


if __name__ == '__main__':
    t = Thread(target=work)
    t.start()

    print('current thread name:\t', threading.current_thread().getName())
    print('current thread:\t', threading.current_thread())
    print('thread(s):\t', threading.enumerate())
    print('num of thread(s):\t', threading.active_count())
Esempio n. 53
0
    def connection_manager(self):
        self.node.logger.app_log.warning("Status: Starting connection manager")
        until_purge = 0

        while not self.node.IS_STOPPING:
            # one loop every 30 sec
            try:
                # dict_keys = peer_dict.keys()
                # random.shuffle(peer_dict.items())
                if until_purge <= 0:
                    # will purge once at start, then about every half hour (60 * 30 sec)
                    self.mp.MEMPOOL.purge()
                    until_purge = 60
                until_purge -= 1

                # peer management
                if not self.node.is_regnet:
                    # regnet never tries to connect
                    self.node.peers.client_loop(self.node, this_target=worker)
                self.node.logger.app_log.warning(
                    f"Status: Threads at {threading.active_count()} / {self.node.thread_limit}"
                )
                self.node.logger.app_log.info(
                    f"Status: Syncing nodes: {self.node.syncing}")
                self.node.logger.app_log.info(
                    f"Status: Syncing nodes: {len(self.node.syncing)}/3")

                # Status display for Peers related info
                self.node.peers.status_log()
                self.mp.MEMPOOL.status()
                # last block
                if self.node.last_block_ago:
                    self.node.last_block_ago = time.time() - int(
                        self.node.last_block_timestamp)
                    self.node.logger.app_log.warning(
                        f"Status: Last block {self.node.last_block} was generated "
                        f"{'%.2f' % (self.node.last_block_ago / 60) } minutes ago"
                    )
                # status Hook
                uptime = int(time.time() - self.node.startup_time)
                status = {
                    "protocolversion": self.node.version,
                    "walletversion": self.node.app_version,
                    "testnet": self.node.is_testnet,
                    # config data
                    "blocks": self.node.last_block,
                    "timeoffset": 0,
                    "connections": self.node.peers.consensus_size,
                    "difficulty":
                    self.node.difficulty[0],  # live status, bitcoind format
                    "threads": threading.active_count(),
                    "uptime": uptime,
                    "consensus": self.node.peers.consensus,
                    "consensus_percent": self.node.peers.consensus_percentage,
                    "last_block_ago": self.node.last_block_ago
                }  # extra data
                if self.node.is_regnet:
                    status['regnet'] = True
                self.node.plugin_manager.execute_action_hook('status', status)
                # end status hook

                # logger.app_log.info(threading.enumerate() all threads)
                # time.sleep(30)
                for i in range(30):
                    # faster stop
                    if not self.node.IS_STOPPING:
                        time.sleep(1)
            except Exception as e:
                self.node.logger.app_log.warning(
                    f"Error in connection manger ({e})")
Esempio n. 54
0
def measure_method(mag_dict, keith_dict, control_dict, lockin_dict):

    display = control_dict['Display']

    # target of threading, allows for smooth running
    def measure_loop():
        global scan_field_output, measured_values, sens_lbl

        measured_values = []
        pos_values = []
        neg_values = []
        sens_lbl = [0]

        # create the lists of field values, scan loop is modified to include full loop
        if control_dict['Field Step'].get() == 'Step':
            # builds list from step and max value
            scan_field_output = make_list(mag_dict['Hx Field (Oe)'].get(),
                                          mag_dict['Hx Step (Oe)'].get())
            # take inverse list and add it on, creating the full list values to measure at
            inverse = reversed(scan_field_output[0:-1])
            scan_field_output += inverse
        else:
            # takes string and converts to list
            scan_field_output = convert_to_list(
                mag_dict['Hx Field (Oe)'].get())
            # take inverse list and add it on, creating the full list values to measure at
            inverse = reversed(scan_field_output[0:-1])
            scan_field_output += inverse

        # create the list of current values
        if control_dict['I_app Step'].get() == 'Step':
            # sensing current list
            sense_output = make_list(
                keith_dict['Sensing Current (mA)'].get(),
                keith_dict['Sensing Current Step (mA)'].get())
        else:
            # sensing current list
            sense_output = convert_to_list(
                keith_dict['Sensing Current (mA)'].get())

        # ensures output voltages will not exceed amp thresholds
        if max(scan_field_output) / float(
                control_dict['Hx/DAC (Oe/V)']) < float(
                    control_dict['Hx DAC Limit']):

            # initialize machines
            amp = lockinAmp(lockin_dict['Mode'], lockin_dict['Sensitivity'],
                            lockin_dict['Signal Voltage'],
                            lockin_dict['Frequency'])
            keith_2400 = Keithley2400('f')  #Initiate K2400
            keith_2000 = Keithley('f')  #Initiate K2000
            # fixed sensing current value
            for sense_val in sense_output:

                sens_lbl[0] = round(sense_val, 3)

                # setup K2400 here
                keith_2400.fourWireOff()
                keith_2400.setCurrent(round(sense_val, 4))
                keith_2400.outputOn()
                # take initial resistance measurement?
                index = 1
                data = []

                while index <= 5:  #Average of five measurements
                    data = data + keith_2400.measureOnce()
                    index += 1
                resistance = round(data[1] / data[2], 4)
                display.insert('end',
                               "Measured current: %f mA" % (1000 * data[2]))
                display.insert('end', "Measured voltage: %f V" % data[1])
                display.insert('end',
                               "Measured resistance: %f Ohm" % (resistance))
                display.see(END)

                # intializes the measurement data list
                measured_values = []

                display.insert(
                    'end', 'Measurement using %s (mA) sensing current' %
                    str(sense_val))
                display.see(END)

                # measurement loops -  measure pos and neg current at give scan value and take avg abs val (ohms)
                for counter, scan_val in enumerate(scan_field_output):

                    if counter == 0:
                        diff = abs(scan_val)
                    else:
                        diff = abs(scan_val - scan_field_output[counter - 1])
                    # function to be built to model the time necessary for the magnets to get to value
                    amp.dacOutput(
                        (scan_val / float(control_dict['Hx/DAC (Oe/V)'])),
                        control_dict['Hx DAC Channel'])
                    time.sleep(charging(diff))
                    keith_2400.outputOn()
                    keith_2400.setCurrent(round(sense_val, 4))
                    time.sleep(float(keith_dict['Delay (s)'].get())
                               )  # delay before measuring
                    pos_data = keith_2000.measureMulti(
                        int(keith_dict['Averages'].get()))
                    keith_2400.setCurrent(round(-sense_val, 4))
                    time.sleep(float(keith_dict['Delay (s)'].get())
                               )  # delay before measuring
                    neg_data = keith_2000.measureMulti(
                        int(keith_dict['Averages'].get()))
                    tmp = round(
                        float((abs(pos_data) - abs(neg_data)) * 1000 /
                              sense_val),
                        4)  # voltage from K2000 / sense current
                    pos_values.append(abs(pos_data) * 1000 / sense_val)
                    neg_values.append(abs(neg_data) * 1000 / sense_val)
                    measured_values.append(tmp)
                    display.insert(
                        'end',
                        'Applied Hx Field Value: %s (Oe)      Measured Avg Resistance: %s (Ohm)'
                        % (scan_val, tmp))
                    display.see(END)

                # save data
                save_method(sense_val, scan_field_output, measured_values,
                            display, control_dict['Directory'],
                            control_dict['File Name'].get(), resistance,
                            pos_values, neg_values)

            # turn everything off at end of loop
            amp.dacOutput(0, control_dict['Hx DAC Channel'])
            keith_2400.minimize()

            display.insert('end', "Measurement finished")
            display.see(END)
        else:
            messagebox.showwarning(
                'Output Too Large',
                'Output value beyond amp voltage threshold')
            display.insert('end', 'Output value too large!')
            display.see(END)

        #----------------------------END measure_loop----------------------------------#

    # Only one thread allowed. This is a cheap and easy workaround so we don't have to stop threads
    if threading.active_count() == 1:
        # thread is set to Daemon so if mainthread is quit, it dies
        t = threading.Thread(target=measure_loop,
                             name='measure_thread',
                             daemon=True)
        t.start()
    else:
        messagebox.showerror('Error', 'Multiple threads detected!')
Esempio n. 55
0
 def thread_count(self):
     r"""int: The number of active threads."""
     return threading.active_count()
Esempio n. 56
0
        while threading.active_count() > 1 and (
                time.time() - _) > THREAD_FINALIZATION_TIMEOUT:
            time.sleep(0.01)

        if cmdLineOptions.get("sqlmapShell"):
            cmdLineOptions.clear()
            conf.clear()
            kb.clear()
            conf.disableBanner = True
            main()


if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        pass
    except SystemExit:
        raise
    except:
        traceback.print_exc()
    finally:
        # Reference: http://stackoverflow.com/questions/1635080/terminate-a-multi-thread-python-program
        if threading.active_count() > 1:
            os._exit(getattr(os, "_exitcode", 0))
        else:
            sys.exit(getattr(os, "_exitcode", 0))
else:
    # cancelling postponed imports (because of Travis CI checks)
    __import__("lib.controller.controller")
Esempio n. 57
0
    def client_loop(self, node, this_target):
        """Manager loop called every 30 sec. Handles maintenance"""
        try:
            for key, value in dict(self.dict_shuffle(self.peer_dict)).items():
                # The dict() above is not an error or a cast,
                # it's to make a copy of the dict and avoid "dictionary changed size during iteration"
                host = key
                port = int(value)

                if self.is_testnet:
                    port = 2829
                if threading.active_count(
                ) / 3 < self.config.thread_limit and self.can_connect_to(
                        host, port):
                    self.app_log.info(
                        f"Will attempt to connect to {host}:{port}")
                    self.add_try(host, port)
                    t = threading.Thread(
                        target=this_target,
                        args=(host, port, node),
                        name=f"out_{host}_{port}"
                    )  # threaded connectivity to nodes here
                    self.app_log.info(
                        f"---Starting a client thread {threading.currentThread()} ---"
                    )
                    t.daemon = True
                    t.start()

            if len(self.peer_dict) < 6 and int(time() -
                                               self.startup_time) > 30:
                # join in random peers after x seconds
                self.app_log.warning(
                    "Not enough peers in consensus, joining in peers suggested by other nodes"
                )
                self.peer_dict.update(self.peers_get(self.suggested_peerfile))

            if len(self.connection_pool) < self.config.nodes_ban_reset and int(
                    time() - self.startup_time) > 15:
                # do not reset before 30 secs have passed
                self.app_log.warning(
                    f"Only {len(self.connection_pool)} connections active, resetting banlist"
                )
                del self.banlist[:]
                self.banlist.extend(
                    self.config.banlist)  # reset to config version
                del self.warning_list[:]

            if len(self.connection_pool) < 10:
                self.app_log.warning(
                    f"Only {len(self.connection_pool)} connections active, "
                    f"resetting the connection history")
                # TODO: only reset large timeouts, or we end up trying the sames over and over if we never get to 10.
                # self.
                self.reset_tried()

            if self.config.nodes_ban_reset <= len(self.banlist) and len(self.connection_pool) <= len(self.banlist) \
                    and (time() - self.reset_time) > 60 * 10:
                # do not reset too often. 10 minutes here
                self.app_log.warning(
                    f"Less active connections ({len(self.connection_pool)}) "
                    f"than banlist ({len(self.banlist)}), resetting banlist and tried list"
                )
                del self.banlist[:]
                self.banlist.extend(
                    self.config.banlist)  # reset to config version
                del self.warning_list[:]
                self.reset_tried()
                self.reset_time = time()

            self.app_log.warning("Status: Testing peers")
            self.peer_dict.update(self.peers_get(self.peerfile))
            # self.peer_dict.update(self.peers_get(self.suggested_peerfile))

            # TODO: this is not OK. client_loop is called every 30 sec and should NOT contain any lengthy calls.
            self.peers_test(self.suggested_peerfile,
                            self.peer_dict,
                            strict=False)
            self.peers_test(self.peerfile, self.peer_dict, strict=True)

        except Exception as e:
            self.app_log.warning(
                f"Status: peers client loop skipped due to error: {e}")
            # raise
            """We do not want to raise here, since the rest of the calling method would be skipped also.
Esempio n. 58
0
    def testTimeBasedCacheSingleThread(self):

        utils.TimeBasedCache()
        num_threads = threading.active_count()
        utils.TimeBasedCache()
        self.assertEqual(threading.active_count(), num_threads)
Esempio n. 59
0
def _keep_alive_thread():
    host_event = False
    last_update = None
    proc_stat = None
    settings.local.host_ping_timestamp = utils.now()

    cur_public_ip = None
    cur_public_ip6 = None
    cur_host_name = settings.local.host.name
    cur_route53_region = settings.app.route53_region
    cur_route53_zone = settings.app.route53_zone
    auto_public_host = settings.local.host.auto_public_host
    auto_public_host6 = settings.local.host.auto_public_host6

    while True:
        try:
            if settings.local.host.id != settings.local.host_id:
                logger.error(
                    'Host ID mismatch',
                    'runners',
                    host=settings.local.host.id,
                    host_id=settings.local.host_id,
                )

            timestamp = utils.now()
            timestamp -= datetime.timedelta(
                microseconds=timestamp.microsecond,
                seconds=timestamp.second,
            )

            if timestamp != last_update:
                last_update = timestamp

                last_proc_stat = proc_stat
                proc_stat = host.usage_utils.get_proc_stat()

                if last_proc_stat and proc_stat:
                    cpu_usage = host.usage_utils.calc_cpu_usage(
                        last_proc_stat, proc_stat)
                    mem_usage = host.usage_utils.get_mem_usage()
                    settings.local.host.usage.add_period(
                        timestamp, cpu_usage, mem_usage)

            yield interrupter_sleep(settings.app.host_ping)

            ping_timestamp = utils.now()

            try:
                open_file_count = len(os.listdir('/proc/self/fd'))
            except:
                open_file_count = 0

            cpu_usage = None
            mem_usage = None
            thread_count = threading.active_count()
            server_count = len(host.global_servers)
            device_count = host.global_clients.count({})
            try:
                cpu_usage, mem_usage = utils.get_process_cpu_mem()
            except:
                logger.exception(
                    'Failed to get process cpu and mem usage',
                    'runners',
                    host_id=settings.local.host_id,
                    host_name=settings.local.host.name,
                )

            host_name = settings.local.host.name
            route53_region = settings.app.route53_region
            route53_zone = settings.app.route53_zone
            if route53_region and route53_zone:
                if cur_public_ip != settings.local.public_ip or \
                        cur_public_ip6 != settings.local.public_ip6 or \
                        cur_host_name != host_name or \
                        cur_route53_region != route53_region or \
                        cur_route53_zone != route53_zone:
                    cur_host_name = host_name
                    cur_public_ip = settings.local.public_ip
                    cur_public_ip6 = settings.local.public_ip6
                    cur_route53_region = route53_region
                    cur_route53_zone = route53_zone

                    auto_public_host, auto_public_host6 = \
                        utils.set_zone_record(
                        route53_region,
                        route53_zone,
                        host_name,
                        cur_public_ip,
                        cur_public_ip6,
                    )

                    settings.local.host.auto_public_host = auto_public_host
                    settings.local.host.auto_public_host6 = auto_public_host6

                    host_event = True
            else:
                auto_public_host = None
                auto_public_host6 = None

            if settings.local.host.auto_public_address != \
                settings.local.public_ip or \
                    settings.local.host.auto_public_address6 != \
                    settings.local.public_ip6:
                settings.local.host.auto_public_address = \
                    settings.local.public_ip
                settings.local.host.auto_public_address6 = \
                    settings.local.public_ip6
                host_event = True

            settings.local.host.collection.update(
                {
                    '_id': settings.local.host_id,
                }, {
                    '$set': {
                        'version': settings.local.version,
                        'server_count': server_count,
                        'device_count': device_count,
                        'cpu_usage': cpu_usage,
                        'mem_usage': mem_usage,
                        'thread_count': thread_count,
                        'open_file_count': open_file_count,
                        'status': ONLINE,
                        'ping_timestamp': utils.now(),
                        'auto_public_address': settings.local.public_ip,
                        'auto_public_address6': settings.local.public_ip6,
                        'auto_public_host': auto_public_host,
                        'auto_public_host6': auto_public_host6,
                    }
                })

            if host_event:
                host_event = False
                event.Event(type=HOSTS_UPDATED)

            monitoring.insert_point('system', {
                'host': settings.local.host.name,
            }, {
                'cpu_usage': cpu_usage,
                'mem_usage': mem_usage,
                'thread_count': thread_count,
                'open_file_count': open_file_count,
            })

            settings.local.host_ping_timestamp = ping_timestamp
        except GeneratorExit:
            host.deinit()
            raise
        except:
            logger.exception(
                'Error in host keep alive update',
                'runners',
                host_id=settings.local.host_id,
                host_name=settings.local.host.name,
            )
            time.sleep(0.5)
elif intLevel in range(2, 4):
    regex1 = r"src=[\'\"]http\://[A-z0-9_\-\./]+|src=[\'\"]\/[A-z0-9_\-\./]+|src=[\'\"]www[A-z0-9_\-\./]+"
    regex2 = r"href=[\'\"]http\://[A-z0-9_\-\./]+|href=[\'\"]\/[A-z0-9_\-\./]+|href=[\'\"]www[A-z0-9_\-\./]+"

    results = []

    match = re.findall(re.compile(regex2), str(strContent))
    matchsrc = re.findall(re.compile(regex1), str(strContent))
    match = match + matchsrc

    q = Queue()
    threads = []

    i = 0
    while i < len(match):
        if threading.active_count() < 10:
            t = threading.Thread(target=findkeywordlvl,
                                 args=(strWebsite, match[i], q))
            t.start()
            threads.append(t)
            i += 1

    for p in threads:
        p.join()
    while not q.empty():
        results.append(q.get_nowait())

    print(results)

    threads = []
    j = 0