Beispiel #1
0
 def run(self):
     socket_server = self.server_socket
     socket_server.listen()
     task_pool = ThreadPool()
     while True:
         client_socket, _ = socket_server.accept()
         task_pool.add_task((client_socket, self.job))
Beispiel #2
0
def from_file(m163,option):
    """ download objects (songs, albums...) from an input file.  """

    urls = []
    with open(option.inFile) as f:
        urls = f.readlines() 

    global total, done, xiami_obj
    total = len(urls)
    print border
    LOG.info(msgTxt.fmt_links_in_file % total)
    print border
    pool = ThreadPool(config.THREAD_POOL_SIZE)
    for link in [u for u in urls if u]:
        link = link.rstrip('\n')
        #if it is a xiami link, init xiami object
        if re.match(pat_xm, link):
            __init_xiami_obj(option)
            pool.add_task(from_url_xm, xiami_obj,link, verbose=False)
        elif re.match(pat_163, link):
            pool.add_task(from_url_163, m163,link, verbose=False)
        else:
            LOG.warning(msgTxt.fmt_skip_unknown_url % link)

    pool.wait_completion()
Beispiel #3
0
def pickle_all_companies():
    tpool = ThreadPool(50)
    companies = Company.objects.all()
    for c in companies:
        tpool.add_task(pickle_company, c.symbol)
    tpool.wait_completion()

    return None
Beispiel #4
0
def main(latest_block: int, url: str, threads: int):
    # Create a folder to store the invalids
    folder = f"./invalids/{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}"
    os.mkdir(folder)

    iterations = latest_block + 1
    for iteration in tqdm(range(1, iterations, threads)):
        pool = ThreadPool(threads)
        for block_number in range(iteration, iteration + threads):
            pool.add_task(worker, url, block_number, folder)
        pool.wait_completion()
Beispiel #5
0
def main():
    start_time = time.time()
    parser = add_parser()
    args = parser.parse_args()
    app_dir = args.path + "\\" + args.app_dir
    global encoding
    encoding = args.encoding
    #line feed
    line_feed = "\n"
    # use console to show information
    global console
    console = Console()

    console.show("Target Path:      " + args.path)
    console.show("Webapp Directory: " + app_dir)
    console.show("Testing Website:  " + args.website)
    console.show("Output File:      " + args.output)

    # start fetching
    console.show("Start fetching url and its parameters in " + args.path)

    global url_data
    url_data = UrlData()
    get_url_list(args.path, app_dir, args.website)
    url_amount = url_data.len()

    #fetch complete
    console.show("Fetched " + str(url_amount) + " url(s).")
    if args.get_status != 1 or args.website == "":
        url_data.export(args.output)
        #exit
        sys.exit()

    console.show("Start testing url status with " \
            + str(args.thread_num) + " thread(s).")
    #init thread pool
    pool = ThreadPool(args.thread_num)
    
    for url in url_data.get_urls():
        pool.add_task(url_request, url)
        console.show_progress(pool.get_progress(), url_amount)
    
    while pool.get_progress() != url_amount:
        console.show_progress(pool.get_progress(), url_amount)
    

    #pool.destroy()
    finish_time = time.time()
    elapsed_time = int(finish_time - start_time)
    #export
    url_data.export(args.output)
    console.show("Task finished in " + str(elapsed_time) + " seconds.")
Beispiel #6
0
    def prime_cache(self):
        """Ensures that the webpage cache is filled in the
        quickest time possible by making many requests in
        parallel"""

        print "Getting data for parts from suppliers' websites"
        pool = ThreadPool(NUM_THREADS)

        for srcode, pg in self.iteritems():
            print srcode
            pool.add_task(pg.get_price)

        pool.wait_completion()
Beispiel #7
0
    def prime_cache(self):
        """Ensures that the webpage cache is filled in the
        quickest time possible by making many requests in
        parallel"""

        print "Getting data for parts from suppliers' websites"
        pool = ThreadPool(NUM_THREADS)

        for srcode, pg in self.iteritems():
            print srcode
            pool.add_task(pg.get_price)

        pool.wait_completion()
Beispiel #8
0
def download_all(links, save_folder):
    if not os.path.exists(save_folder):
        os.makedirs(save_folder)
    if USING_THREAD:
        pool = ThreadPool(NUMBER_OF_THREADS)
        for link in links:
            with _thread_print_lock:
                logger.debug("download from link: \n  %s \n" % link)
            pool.add_task(download_file, link, save_folder)
            #pool.add_task(test_func)
            time.sleep(5)
        pool.wait_completion()
    else:
        for link in links:
            logger.debug("download from link: \n  %s \n" % link)
            download_file(link, save_folder)
Beispiel #9
0
def job():
    """
    Updater job for periodic repetitions.
    """
    print('[+] PUT UPDATER INTO THREADPOOL AT [%s]' % (now()))
    pool = ThreadPool(POOL_COUNT)
    pool.add_task(populate, None)
    pool.wait_completion()
    print('[+] COMPLETE POPULATE AT [%s]' % (now()))
    pool.add_task(update, None)
    pool.wait_completion()
    print('[+] COMPLETE UPDATE AT [%s]' % (now()))
    del pool
    cache.rpush(
        'incomingQueue',
        'StartUpdateVulnerabilityDataBase')
Beispiel #10
0
def from_file(xm_obj, infile):
    """ download objects (songs, albums...) from an input file.  """

    urls = []
    with open(infile) as f:
        urls = f.readlines() 

    global total, done
    total = len(urls)
    print border
    LOG.info(u' 文件包含链接总数: %d' % total)
    print border
    pool = ThreadPool(config.THREAD_POOL_SIZE)
    for link in [u for u in urls if u]:
        pool.add_task(from_url, xm_obj,link.rstrip('\n'), verbose=False)

    pool.wait_completion()
Beispiel #11
0
def from_file(xm_obj, infile):
    """ download objects (songs, albums...) from an input file.  """

    urls = []
    with open(infile) as f:
        urls = f.readlines()

    global total, done
    total = len(urls)
    print border
    LOG.info(u' 文件包含链接总数: %d' % total)
    print border
    pool = ThreadPool(config.THREAD_POOL_SIZE)
    for link in [u for u in urls if u]:
        pool.add_task(from_url, xm_obj, link.rstrip('\n'), verbose=False)

    pool.wait_completion()
Beispiel #12
0
class Spider(object):

    def __init__(self,seed,depth,pool_size=10):
        
        self.seed = seed
        self.depth = depth
        self.all_url_list = [seed]
        self.finished_url_list = []
        self.failure_url_list = []
        self.pool = ThreadPool(pool_size)

    def crawl(self):
        base_deep_size = 0
        while base_deep_size <= self.depth:
            for url in self.all_url_list:
                if url not in self.finished_url_list:
                    self.pool.add_task(self.download,url)
            self.pool.close()
            self.depth-=1

    def download(self,url):
        try:
            data = urllib2.urlopen(url)
            page = data.read()
            self.finished_url_list.append(url)
            links = self.get_urls(page)
            return page,links
        except Exception as e:
            print 'open url:%s raise exception(%s)'%(url,e)
            return None

    def get_urls(self,page):
        soup = BeautifulSoup(page,fromEncoding="gb18030")
        if soup.title:
            print soup.title.string
        links = []
        for item in soup.findAll('a'):
            link=item.get('href')
            if link and link.startswith('http://') and link not in self.finished_url_list:
                links.append(link)
        print links
        return links

    def get_next_url(self):
        pass
Beispiel #13
0
def run():

    
    logging.info('start subscribe server.....')

    
    ##创建抓取网页的线程池
    grab_pool = ThreadPool(GRAB_NUM)
    for i in range(GRAB_NUM):
        grab_pool.add_task(do_grab,None,id = i+1)
        
    ##创建解析网页的线程池
    paser_pool = ThreadPool(PASER_NUM)
    for i in range(PASER_NUM):
        paser_pool.add_task(do_paser,None,id = i+1)

        
    ##创建发送邮件的线程池
    send_pool = ThreadPool(MAIL_SENDER_NUM)
    for i in range(MAIL_SENDER_NUM):
        send_pool.add_task(do_send,None,id = i+1)

    # Join and destroy all threads
    grab_pool.destroy()
    paser_pool.destroy()
    send_pool.destroy()
Beispiel #14
0
def from_file(xm_obj,m163, infile):
    """ download objects (songs, albums...) from an input file.  """

    urls = []
    with open(infile) as f:
        urls = f.readlines() 

    global total, done
    total = len(urls)
    print border
    LOG.info(u' 文件包含链接总数: %d' % total)
    print border
    pool = ThreadPool(config.THREAD_POOL_SIZE)
    for link in [u for u in urls if u]:
        link = link.rstrip('\n')
        if re.match(pat_xm, link):
            pool.add_task(from_url_xm, xm_obj,link, verbose=False)
        elif re.match(pat_163, link):
            pool.add_task(from_url_163, m163,link, verbose=False)
        else:
            LOG.warning(u' 略过不能识别的url [%s].' % link)

    pool.wait_completion()
Beispiel #15
0
def make_all_pairs(use_celery=False, skip_update=False, skip_pickle=False, skip_worker_update=False):
    """
    This will check all of the pairs, either threaded
    or via celery (i.e. local v cloud)
    """
    logger.info(colored('Collecting companies', 'white', attrs=['bold']))
    companies = Company.objects.all()
    tpool = ThreadPool(50)

    if not skip_update:
        logger.info(colored('Updating prices', 'white', attrs=['bold']))
        for c in companies:
            tpool.add_task(c.update_prices)
        tpool.wait_completion()
        logger.info(colored('Prices updated', 'white', attrs=['bold']))

    symbols = [c.symbol for c in companies]

    if not skip_pickle:
        logger.info(colored('Pickling companies', 'white', attrs=['bold']))
        pickle_all_companies()

    if not skip_worker_update:
        logger.info(colored('Updating workers', 'white', attrs=['bold']))
        update_workers()

    if use_celery:
        for s1, s2 in itertools.combinations(symbols, 2):
            make_pair.delay(s1, s2)

    else:

        for s1, s2 in itertools.combinations(symbols, 2):
            tpool.add_task(make_pair, s1, s2)
        tpool.wait_completion()

    return
Beispiel #16
0
    def run(self, symnames, init_symbols=None, N=4):
        if isinstance(symnames, (str, unicode)):
            symnames = [ symnames ]
        
        self.finalize()
        toporder = self._topsort_subgraph(symnames)
        sources  = ( v for v in toporder if len(self.depends[v]) == 0 )
        remain_to_submit = SynchronizedCounter(len(toporder))
        finished_deps = defaultdict(SynchronizedCounter)
        p = ThreadPool(N)

        if init_symbols is None:
            syms = SymbolTable()
        else:
            syms = init_symbols

        parentlock = RLock()

        done_submitting = Condition()
        # If the child thread notifies before the parent thread reaches the
        # wait statement, then the parent will never receive the notification
        # and will block forever. To fix this, the child will decrement this
        # counter to zero, and the parent will check this before waiting.
        done_submitting_helper = SynchronizedCounter(1)
        # The callback runs within the thread. Don't know how to fix.
        def make_apply_callback(gf):
            def finished(new_syms):
                parentlock.acquire()
                self.results[gf] = new_syms
                parentlock.release()

                parentlock.acquire()
#                print "%s finished! printing state"%(gf.name)
#                print "finished_deps", finished_deps
#                print >> sys.stderr, "%s completed. new_syms = %s"%(gf.name, new_syms)
#                print "self.depends", self.depends
                parentlock.release()
                # Update the functions which we precede
                for next_gf in self.preceded_by[gf]:
                    finished_deps_next_gf = finished_deps[next_gf].inc()

                    if finished_deps_next_gf == len(self.depends[next_gf]):
                        # All dependencies satisfied; we can run!
                        # This may take a bit of time, but we want to do
                        # all data manipulation in this process.
                        print >> sys.stderr, "Dependencies for %s satisfied. Queueing."%next_gf.name
                        symtable = SymbolTable(parents=[self.results[r] for r in self.depends[next_gf]])

                        # Queue doesn't need to be locked
                        p.add_task(next_gf, args=(symtable,), callback=make_apply_callback(next_gf))
                        if remain_to_submit.dec() == 0:
                            print >> sys.stderr, "All jobs have been submitted. Waiting for parent thread to be ready to receive done_submitting"
                            done_submitting.acquire()
                            done_submitting.notify()
                            done_submitting.release()
                            done_submitting_helper.dec()
            return finished                            

        for s in sources:
            remain_to_submit.dec()
            p.add_task(s, args=(SymbolTable(),), callback=make_apply_callback(s))

        if done_submitting_helper.get() > 0:
            done_submitting.acquire()
            print >> sys.stderr, "PARENT THREAD: Awaiting condition variable"
            done_submitting.wait()
            done_submitting.release()
        print >> sys.stderr, "PARENT THREAD: Joining the thread pool"
        p.wait_completion()

        ret = dict((sym, self.results[self.supplier[sym]][sym]) for sym in symnames)
        return ret
Beispiel #17
0
class Database:
    """Asynchronous database interface.

    The `driver' argument specifies which database to use. Possible
    values are:

    MySQLdb - for MySQL
    psycopg2 - for Postgres
    """
    def __init__(self,
                 driver=None,
                 database=None,
                 user=None,
                 password=None,
                 host='localhost',
                 ioloop=tornado.ioloop.IOLoop.instance(),
                 num_threads=10,
                 tx_connection_pool_size=5,
                 queue_timeout=1):
        if not (driver):
            raise ValueError("Missing 'driver' argument")
        self._driver = driver
        self._database = database
        self._user = user
        self._password = password
        self._host = host
        self._threadpool = ThreadPool(
            per_thread_init_func=self.create_connection,
            per_thread_close_func=self.close_connection,
            num_threads=num_threads,
            queue_timeout=queue_timeout)
        self._ioloop = ioloop

        # Connection pool for transactions
        self._connection_pool = []
        for i in xrange(tx_connection_pool_size):
            conn = self.create_connection()
            self._connection_pool.append(conn)
        self._waiting_on_connection = deque()

    def create_connection(self):
        """This method is executed in a worker thread.

        Initializes the per-thread state. In this case we create one
        database connection per-thread.
        """
        if self._driver == "psycopg2":
            try:
                import psycopg2
                conn = psycopg2.connect(database=self._database,
                                        user=self._user,
                                        password=self._password,
                                        host=self._host)
            except Exception as ex:
                raise ex
        elif self._driver == "MySQLdb":
            try:
                import MySQLdb
                conn = MySQLdb.connect(db=self._database,
                                       user=self._user,
                                       passwd=self._password,
                                       host=self._host,
                                       port=3306)
            except Exception as ex:
                raise ex
        else:
            raise ValueError("Unknown driver %s" % self._driver)
        return conn

    def close_connection(self, conn):
        conn.close()

    def stop(self):
        self._threadpool.stop()
        for conn in self._connection_pool:
            conn.close()

    @async
    def beginTransaction(self, callback):
        """Begins a transaction. Picks up a transaction from the pool
        and passes it to the callback. If none is available, adds the
        callback to `_waiting_on_connection'.
        """
        if self._connection_pool:
            conn = self._connection_pool.pop()
            callback(conn)
        else:
            self._waiting_on_connection.append(callback)

    @async
    def commitTransaction(self, connection, callback):
        self._threadpool.add_task(
            partial(self._commitTransaction, connection, callback))

    def _commitTransaction(self, conn, callback, thread_state=None):
        """Invoked in a worker thread.
        """
        conn.commit()
        self._ioloop.add_callback(
            partial(self._releaseConnectionInvokeCallback, conn, callback))

    @async
    def rollbackTransaction(self, connection, callback):
        self._threadpool.add_task(
            partial(self._rollbackTransaction, connection, callback))

    def _rollbackTransaction(self, conn, callback, thread_state=None):
        """Invoked in a worker thread.
        """
        conn.rollback()
        self._ioloop.add_callback(
            partial(self._releaseConnectionInvokeCallback, conn, callback))

    def _releaseConnectionInvokeCallback(self, conn, callback):
        """Release the connection back in the connection pool and
        invoke the callback. Invokes any waiting callbacks before
        releasing the connection into the pool.
        """
        # First invoke the callback to let the program know we're done
        # with the transaction.
        callback(conn)
        # Now check to see if we have any pending clients. If so pass
        # them the newly released connection.
        if self._waiting_on_connection:
            callback = self._waiting_on_connection.popleft()
            callback(conn)
        else:
            self._connection_pool.append(conn)

    @async
    def runQuery(self, query, args=None, conn=None, callback=None):
        """Send a SELECT query to the database.

        The callback is invoked with all the rows in the result.
        """
        self._threadpool.add_task(partial(self._query, query, args, conn),
                                  callback)

    def _query(self, query, args, conn=None, thread_state=None):
        """This method is called in a worker thread.

        Execute the query and return the result so it can be passed as
        argument to the callback.
        """
        if not conn:
            conn = thread_state
        cursor = conn.cursor()
        cursor.execute(query, args)
        rows = cursor.fetchall()
        cursor.close()
        return rows

    @async
    def runOperation(self, stmt, args=None, conn=None, callback=None):
        """Execute a SQL statement other than a SELECT.

        The statement is committed immediately. The number of rows
        affected by the statement is passed as argument to the
        callback.
        """
        self._threadpool.add_task(partial(self._execute, stmt, args, conn),
                                  callback)

    def _execute(self, stmt, args, conn=None, thread_state=None):
        """This method is called in a worker thread.

        Executes the statement.
        """
        # Check if stmt is a tuple. This can happen when we use map()
        # with adisp to execute multiple statements in parallel.
        if isinstance(stmt, tuple):
            args = stmt[1]
            stmt = stmt[0]
        if not conn:
            conn = thread_state
            should_commit = True
        else:
            should_commit = False
        cursor = conn.cursor()
        cursor.execute(stmt, args)
        if should_commit:
            conn.commit()
        rowcount = cursor.rowcount
        cursor.close()
        return rowcount
Beispiel #18
0
def functional_test():
    r = redis.StrictRedis(host='localhost', port=8323)
    # set/get
    print r.set("key", 'b'*56000)
    print len(r.get("key"))

    # incr
    print r.set("incr_key",0)
    print r.get("incr_key")
    print r.incr('incr_key')
    print r.get("incr_key")

def press_test():
    r = redis.StrictRedis(host='localhost', port=8323)
    for i in range(10000):
        key = 'foo_%d'%i
        r.set(key, 'b'*i)
        if i%1000==0:
            print key, "->", len(r.get(key))

if __name__=="__main__":
    #functional_test()
    # Create thread pool with nums threads
    pool = ThreadPool(32)
    # Add a task into pool
    for n in range(10):
        pool.add_task(functional_test)
        pool.add_task(press_test)
    # Join and destroy all threads
    pool.destroy()
Beispiel #19
0
def start(baseUrl,seedUrl):
    # clean reffer in reffer.txt
    f = open("reffer.txt","w")
    f.close()

    #seed = Request(base='http://192.168.42.131/dvwa/index.php',url='http://192.168.42.131/dvwa/index.php',method='get')
    seed = request.Request(base=baseUrl,url=seedUrl,timeout=config.conf['connTimeout'],query={},method='get')
    #seed = request.Request(base='http://192.168.42.132/dvwa/',url='http://192.168.42.132/dvwa/',query={},method='get')
    colors.blue( '种子URL: %s\n'%seed._url)
    logfileName = create_logfile(seed._url)
    cookie = getCookie(seed._url)
    
    # begin crawler
    tup = urlparse.urlparse(seed._url)
    netloc = tup.netloc # seed url 
    count = 0
    q = Queue.Queue()
    bf = bloomFilter.BloomFilter(0.001,100000)
    # readreffer from reffer.txt
    '''
    reffer = readReffer()
    reqSet = []
    reqSet.append(seed)
    reqSet.extend(reffer)
    for i in reqSet:
        q.put(i)
        bf.insert(i._url)
    '''
    q.put(seed)
    bf.insert(seed._url)

    nums = config.conf['MaxThread']
    pool = ThreadPool(nums)
    begin = time.time()
    while(not q.empty()):
        req = q.get()
        req._cookies = cookie
        reqs = crawler.crawl(req,tree)

        if req._query != {} and is_tree_full(req._url,tree):
        #if req._query != {}:
            count += 1 
            print 'URL: ',req._BFUrl,'  ', req._source
            pool.add_task(startCheck,req,logfileName)
        

        for x in reqs:
            if not bf.exist(x._BFUrl):
                bf.insert(x._BFUrl)
                q.put(x)


    pool.destroy()
    end = time.time()
    
    f = open(logfileName,'r')
    colors.blue('\n扫描结果:\n\n')
    x  = f.read()
    colors.green(x)
    colors.blue('\n扫描结果已保存在 "%s"\n\n'%(os.getcwd()+'/'+logfileName)+' 中')
    cost = end - begin 
    print "耗时:%f秒"%cost
    print "进行测试的URL数量:",count
    f.close()
    f = open(logfileName,'a')
    f.write(advice())
    f.close()
    os.system('ps -ef | grep -v grep | grep proxy.py | awk \'{print $2}\'|xargs kill -9')
    '''
Beispiel #20
0
 def lagouScrapy(self):
     tp = ThreadPool(18)
     for i in range(200):
         tp.add_task(self.lagou, i)
         i += 1
     tp.destroy()
Beispiel #21
0
class Tasks(Codes):

    def __init__(self):
        self.operate = Operate()
        self._api = OpenApi()
        self._http = HttpClient.getInstance()
        self._pool = ThreadPool(5)    # 初始化5个线程
        print("Task Class 初始化完毕")

    def getAllAdmin(self):
        print("所有管理员: %s", variable.Admins)
        return variable.Admins

    def getAllGroup(self):
        print("所有关注群: %s", variable.Groups)
        return variable.Groups

    def addAdmin(self, qq):
        return self.operate.addAdmin(qq)

    def delAdmin(self, qq):
        return self.operate.delAdmin(qq)

    def isAdmin(self, qq):
        return self.operate.isAdmin(qq)

    def addGroup(self, qq):
        return self.operate.addGroup(qq)

    def delGroup(self, qq):
        return self.operate.delGroup(qq)

    def inGroup(self, qq):
        # print("inGroup: %s", qq)
        return self.operate.inGroup(qq)

    def addAsk(self, question, answer):
        return self.operate.addAsk(question, answer)

    def delAsk(self, Id):
        return self.operate.delAsk(Id)

    def getAsk(self, content):
        return self.operate.getAsk(content)

    def end(self):
        self._pool.destroy()

    def uin_to_qq(self, uin):
        if uin in variable.UsersQQ:
            return variable.UsersQQ.get(uin)
        print("获取qq %s %s %s", uin, variable.Vfwebqq, variable.Referer)
        html = self._http.get(variable.Get_friend_uin2.format(uin, self.bytesToStr(variable.Vfwebqq)), referer = variable.Referer)
        print("uin_to_qq: %s", html)
        try:
            result = json.loads(self.bytesToStr(html))
            if result.get("retcode") != 0:
                return ""
            qq = result.get("result").get("account")
            if qq:
                variable.UsersQQ[uin] = str(qq)
                return str(qq)
        except Exception as e:
            print(e)
            return ""

    def sendMsg(self, *args, **kwargs):
        print("回复消息")
        url = kwargs.get("url")
        data = kwargs.get("data")
        # print(data)
        referer = kwargs.get("referer")
        result = self._http.post(url = url, data = data, referer = referer)
        print("回复结果: %s", result)

    def otherMsg(self, content, to, url, uin):
        if content:
            html = self._http.get(url = variable.RobotUrl.format(quote(content), uin))
            html = html.replace("\\n", "").replace("\n", "")
            html = self._api.parse(html)
            html = self._api.getResult()
            if html:
                print("智能回复: ", html)
                data = {'r' : variable.Msg_Data.format(to, uin, html, variable.Clientid, variable.Msgid, variable.Psessionid)}
                print(data)
                self._pool.add_task(callback = self.sendMsg, url = url, data = data, referer = variable.Referer)

    def analyze(self, qq, uin, content, iseq = None):
        print("开始解析消息")
        if iseq:
            print("消息来自群")
            to = "group_uin"
            url = variable.Send_qun_msg2
        else:
            print("消息来自好友")
            to = "to"
            url = variable.Send_buddy_msg2
        # 是管理员
        if self.isAdmin(qq) and content in ("开启机器人", "关闭机器人", "退出"):
            # 解析管理员命令
            _msg = ""
            print("是管理员消息")
            if content == "开启机器人":
                variable.State = True
                print("机器人已开启")
                _msg = "机器人已开启"
            elif content == "关闭机器人":
                variable.State = False
                print("机器人已关闭")
                _msg = "机器人已关闭"
            elif content == "退出":
                variable.State = False
                variable.Exit = True
                print("机器人已退出")
                _msg = "机器人已退出"
            if _msg:
                data = {'r' : variable.Msg_Data.format(to, uin, _msg, variable.Clientid, variable.Msgid, variable.Psessionid)}
                self._pool.add_task(callback = self.sendMsg, url = url, data = data, referer = variable.Referer)
            return
        # 给我发送的私人消息(是否是命令)
        result = variable.Command.findall(content)
        if result and to == "to":
            ver, msg = result[0]
            _msg = ""
            if ver == variable.AddAdmin:
                # 添加管理员
                print("添加管理员")
                if self.addAdmin(msg):
                    _msg = "添加管理员: " + msg + " 成功"
                else:
                    _msg = "添加管理员: " + msg + " 失败"
            elif ver == variable.DelAdmin:
                # 删除管理员
                print("删除管理员")
                if self.delAdmin(msg):
                    _msg = "删除管理员: " + msg + " 成功"
                else:
                    _msg = "删除管理员: " + msg + " 失败"
            elif ver == variable.AddAttention:
                # 添加关注群号
                print("添加关注群")
                if self.addGroup(msg):
                    _msg = "添加关注群: " + msg + " 成功"
                else:
                    _msg = "添加关注群: " + msg + " 失败"
            elif ver == variable.DelAttention:
                # 删除关注群号
                print("删除关注群号")
                if self.delGroup(msg):
                    _msg = "删除关注群: " + msg + " 成功"
                else:
                    _msg = "删除关注群: " + msg + " 失败"
            if _msg:
                data = {'r' : variable.Msg_Data.format(to, uin, _msg, variable.Clientid, variable.Msgid, variable.Psessionid)}
                self._pool.add_task(callback = self.sendMsg, url = url, data = data, referer = variable.Referer)
            return
        if content.startswith("#") and len(content) > 2 and self.inGroup(iseq):
            # 开头以#开头并且长度大于2并且是关注群发送的消息
            i = 0
            content = content[1:].strip()
            for w in content:
                if w in variable.Filter:
                    print("发现过滤词: ", w)
                    i += 1
            if i == 0:
                self._pool.add_task(callback = self.otherMsg, content = content, to = to, url = url, uin = uin)
                return
            else:
                data = {'r' : variable.Msg_Data.format(to, uin, "你想干什么!f**k", variable.Clientid, variable.Msgid, variable.Psessionid)}
                self._pool.add_task(callback = self.sendMsg, url = url, data = data, referer = variable.Referer)
                return
        if to == "to" and len(content) > 2:
            # 私人消息
            self._pool.add_task(callback = self.otherMsg, content = content.strip(), to = to, url = url, uin = uin)
            return

    def delwith(self, fuin, suin, iseq, content):
        '''
        #fuin 消息发送者
        #suin 群消息发送人qq
        #iseq 群号码
        #content 消息内容
        '''
        print("%s %s %s %s", fuin, suin, iseq, content)
        # 如果是群文件消息
        if content.startswith("<?xml"):
            print("发现共享文件")
        # 发送者qq
        if iseq and suin:
            qq = self.uin_to_qq(suin)
        else:
            qq = self.uin_to_qq(fuin)
        print("qq: %s", qq)
        self.analyze(qq, fuin, content, iseq)
        for i in range(1, 10):
            try:
                imageUrls = get_image_url(restUrl)
                break
            except Exception, e:
                print 'get restUrl error times' + str(i) + ': %s' % (e,)
                logging.error('get restUrl error times' + str(i) + ': %s' % (e,))
                time.sleep(10)

        if imageUrls is None or len(imageUrls) == 0:
            print 'get imageUrls error %s' % restUrl
            logging.error('get imageUrls error %s' % restUrl)
            continue

        # logging.debug("progress: %d of %d, %s , %d images", progress, total, reviewName, len(urls))  # 进度

        count = 0
        for imageUrl in imageUrls:
            # download_image(imageUrl, LOCAL_DIR + reviewName,
            # imageUrl[imageUrl.rfind("/") + 1:] + ".jpg")
            pool.add_task(download_image, imageUrl, LOCAL_DIR + reviewName,
                          imageUrl[imageUrl.rfind("/") + 1:], SLEEP_SECONDS)  # 多线程下载图片
            count += 1
            # logging.debug("task added: %d", count)
            # logging.debug("finished : %s", reviewName)
            # print "finished : %s" % ( reviewName)
            #logging.info("finished : %s" % ( reviewName))
    pool.destroy()

def main_fresh(dbOrNot):
    """
    Monitor URLs using fresh data.
    """
    # set value for oldUrlObjDic dict.
    f = open("./urgentCriterion_new")
    while 1:
        string = f.readline().strip()
        if not string:
            break
        arr = string.split(",")
        #URL Object Format: URL(length, md5)
        oldUrlObjDic[arr[0]] = URL(int(arr[1]), arr[2])
    f.close()

    f = open("./urgentAccessErrorURLs")
    while 1:
        string= f.readline().strip()
        if not string:
            break
        aeURLs.append(string)
    f.close()

    #lxw_tp
    #threadingNum = threading.Semaphore(THREADS_NUM)
    tp = ThreadPool(THREADS_NUM)

    threads = []
    urlCount = 0
    # monitor each url in .urls file
    f = open("./.urgentURLS")
    while 1:
        url = f.readline().strip()
        if not url:
            break

        #lxw_tp
        #Multiple Thread: Deal with "one url by one single thread".
        #mt = MyThread(monitor, (url,), threadingNum)
        tp.add_task(monitor, url)
        #mt.start()
        #threads.append(mt)

        urlCount += 1
    f.close()

    #lxw_tp
    tp.destroy()
    #for thread in threads:
    #    thread.start()

    """
    while 1:
        over = True
        for thread in threads:
            if thread.isAlive():
                if not thread.isTimedOut():     # not "Timed Out".
                    over = False
                else:
                    urgentMyUtils.writeLog("lxw_Timed Out", thread.getURL(), "")
        if over:
            break
    """

    if aeCount > 0:
        allContent = "本次共监测网站{0}个, 其中有{1}个网站访问异常, 详细信息如下:\n\n{2}".format(urlCount, aeCount, aeContent)
        urgentMyUtils.sendEmail(aeSubject, allContent)
    if uwCount >0:
        allContent = "本次共监测网站{0}个, 其中有{1}个网站监测到有更新, 详细信息如下:\n\n{2}".format(urlCount, uwCount, uwContent)
        urgentMyUtils.sendEmail(uwSubject, allContent)

    #Update Criterion file.
    f = open("./urgentCriterion_new", "w")
    for url in newUrlObjDic.keys():
        f.write("{0},{1},{2}\n".format(url, newUrlObjDic[url].length, newUrlObjDic[url].getMD5Str()))
    f.close()

    dbOrNot = False
    if dbOrNot:
        #update criterion in database
        urgentMyUtils.updateCriterion(newUrlObjDic)

    #Update accessErrorURLs file.
    f = open("./urgentAccessErrorURLs", "w")
    for url in aeURLs:
        f.write(url + "\n")
    f.close()
Beispiel #24
0
class Server(object):
    """
    A server to host MPI, will delegate all of its calls to the active simulation kernel.
    """
    # Don't forward some of the internally provided functions, but simply raise an AttributeError
    noforward = frozenset(["__str__", "__getstate__", "__setstate__", "__repr__"])

    def __init__(self, name, totalSize):
        """
        Constructor

        :param name: the name of the server, used for addressing (in MPI terms, this is the rank)
        :param totalSize: the total size of the network in which the model lives
        """
        self.name = name
        self.kernel = None
        self.size = totalSize
        self.proxies = [MPIRedirect(i) for i in range(totalSize)]
        from MPIRedirect import LocalRedirect
        self.proxies[name] = LocalRedirect(self)
        self.queuedMessages = []
        self.queuedTime = None
        if totalSize > 1:
            self.threadpool = ThreadPool(2)
            self.bootMPI()

    def getProxy(self, rank):
        """
        Get a proxy to a specified rank. 
        
        This rank is allowed to be the local server, in which case a local shortcut is created.

        :param rank: the rank to return a proxy to, should be an int
        :returns: proxy to the server, either of type MPIRedirect or LocalRedirect
        """
        return self.proxies[rank]

    def checkLoadCheckpoint(self, name, gvt):
        """
        Reconstruct the server from a checkpoint.

        :param name: name of the checkpoint
        :param gvt: the GVT to restore to
        :returns: bool -- whether or not the checkpoint was successfully loaded
        """
        rank = self.name
        #assert debug("Accessing file " + str("%s_%s_%s.pdc" % (name, gvt, rank)))
        try:
            infile = open("%s_%s_%s.pdc" % (name, gvt, rank), 'r')
            pickle.load(infile)
            return True
        except KeyboardInterrupt:
            # If the user interrupts, still reraise
            raise
        except Exception as e:
            # Something went wrong
            print("Error found: " + str(e))
            return False
        
    def loadCheckpoint(self, name, gvt):
        """
        Reconstruct the server from a checkpoint.

        :param name: name of the checkpoint
        :param gvt: the GVT to restore to
        """
        rank = self.name
        #assert debug("Accessing file " + str("%s_%s_%s.pdc" % (name, gvt, rank)))
        infile = open("%s_%s_%s.pdc" % (name, gvt, rank), 'r')
        self.kernel = pickle.load(infile)
        self.kernel.server = self
        from MPIRedirect import LocalRedirect
        self.proxies[self.name] = LocalRedirect(self)
        infile.close()
        #assert debug("Closing file")
        self.kernel.loadCheckpoint()

    def setPickledData(self, pickled_data):
        """
        Set the pickled representation of the model.

        For use on the controller itself, as this doesn't need to unpickle the model.

        :param pickled_data: the pickled model
        """
        self.kernel.pickledModel = pickled_data

    def prepare(self, scheduler):
        """
        Prepare the server to receive the complete model over MPI

        :param scheduler: the scheduler to use
        """
        data = middleware.COMM_WORLD.bcast(None, root=0)
        if data is not None:
            self.saveAndProcessModel(data, scheduler)
            middleware.COMM_WORLD.barrier()

    def saveAndProcessModel(self, pickledModel, scheduler):
        """
        Receive the model and set it on the server, but also saves it for further reinitialisation.

        :param pickledModel: pickled representation of the model
        :param scheduler: the scheduler to use
        """
        self.sendModel(pickle.loads(pickledModel), scheduler)
        self.kernel.pickledModel = pickledModel

    def getName(self):
        """
        Returns the name of the server

        Is practically useless, since the server is previously addressed using its name. This does have a use as a ping function though.
        """
        # Actually more of a ping function...
        return self.name

    # All calls to this server are likely to be forwarded to the currently
    #  active simulation kernel, so provide an easy forwarder
    def __getattr__(self, name):
        """
        Remote calls happen on the server object, though it is different from the simulation kernel itself. Therefore, forward the actual function call to the correct kernel.

        :param name: the name of the method to call
        :returns: requested attribute
        """
        # For accesses that are actually meant for the currently running kernel
        if name in Server.noforward:
            raise AttributeError()
        return getattr(self.kernel, name)

    def processMPI(self, data, comm, remote):
        """
        Process an incomming MPI message and reply to it if necessary

        :param data: the data that was received
        :param comm: the MPI COMM object
        :param remote: the location from where the message was received
        """
        # Receiving a new request
        resendTag = data[0]
        function = data[1]
        args = data[2]
        kwargs = data[3]
        result = getattr(self, function)(*args, **kwargs)
        if resendTag is not None:
            if result is None:
                result = 0
            comm.send(result, dest=remote, tag=resendTag)

    def listenMPI(self):
        """
        Listen for incomming MPI messages and process them as soon as they are received
        """
        comm = middleware.COMM_WORLD
        status = middleware.MPI.Status()
        while 1:
            #assert debug("[" + str(comm.Get_rank()) + "]Listening to remote " + str(middleware.MPI.ANY_SOURCE) + " -- " + str(middleware.MPI.ANY_TAG))
            # First check if a message is present, otherwise we would have to do busy polling
            data = comm.recv(source=middleware.MPI.ANY_SOURCE, tag=middleware.MPI.ANY_TAG, status=status)
            tag = status.Get_tag()
            #assert debug("Got data from " + str(status.Get_source()) + " (" + str(status.Get_tag()) + "): " + str(data))
            if tag == 0:
                # Flush all waiters, as we will never receive an answer when we close the receiver...
                self.finishWaitingPool()
                break
            elif tag == 1:
                # NOTE Go back to listening ASAP, so do the processing on another thread
                if data[1] == "receive" or data[1] == "receiveAntiMessages":
                    self.threadpool.add_task(Server.processMPI, self, list(data), comm, status.Get_source())
                else:
                    # Normal 'control' commands are immediately executed, as they would otherwise have the potential to deadlock the node
                    threading.Thread(target=Server.processMPI, args=[self, list(data), comm, status.Get_source()]).start()
            else:
                # Receiving an answer to a previous request
                try:
                    event = MPIRedirect.waiting[tag]
                    MPIRedirect.waiting[tag] = data
                    event.set()
                except KeyError:
                    # Probably processed elsewhere already, just skip
                    pass
                except AttributeError:
                    # Key was already set elsewhere
                    pass
        
    def finishWaitingPool(self):
        """
        Stop the complete MPI request queue from blocking, used when stopping simulation is necessary while requests are still outstanding.
        """
        for i in MPIRedirect.waiting:
            try:
                i.set()
            except AttributeError:
                # It was not a lock...
                pass
            except KeyError:
                # It was deleted in the meantime
                pass

    def bootMPI(self):
        """
        Boot the MPI receivers when necessary, on an other thread to prevent blocking
        """
        if self.size > 1:
            listener = threading.Thread(target=Server.listenMPI, args=[self])
            # Make sure that this is a daemon on the controller, as otherwise this thread will prevent the atexit from stopping
            # Though on every other node this should NOT be a daemon, as this is the only part still running

            if middleware.COMM_WORLD.Get_rank() == 0:
                listener.daemon = True
            listener.start()

    def sendModel(self, data, scheduler):
        """
        Receive a complete model and set it.

        :param data: a tuple containing the model, the model_ids dictionary, scheduler name, and a flag for whether or not the model was flattened to allow pickling
        :param scheduler: the scheduler to use
        """
        model, model_ids, flattened = data
        if self.name == 0:
            self.kernel = Controller(self.name, model, self)
        else:
            self.kernel = BaseSimulator(self.name, model, self)
        self.kernel.sendModel(model, model_ids, scheduler, flattened)

    def finish(self):
        """
        Stop the currently running simulation
        """
        sim = self.kernel
        with sim.simlock:
            # Shut down all threads on the topmost simulator
            sim.finished = True
            sim.shouldrun.set()
            self.finishWaitingPool()

            # Wait until they are done
            sim.simFinish.wait()

    def queueMessage(self, time, model_id, action):
        """
        Queue a delayed action from being sent, to make it possible to batch them.
        
        Will raise an exception if previous messages form a different time were not yet flushed!
        This flushing is not done automatically, as otherwise the data would be received at a further timestep
        which causes problems with the GVT algorithm.

        :param time: the time at which the action happens
        :param model_id: the model_id that executed the action
        :param action: the action to execute (as a string)
        """
        if self.queuedTime is None:
            self.queuedTime = time
        elif time != self.queuedTime:
            raise DEVSException("Queued message at wrong time! Probably forgot a flush")
        self.queuedMessages.append([model_id, action])

    def flushQueuedMessages(self):
        """
        Flush all queued messages to the controller. This will block until all of them are queued.
        It is required to flush all messages right after all of them happened and this should happen within the critical section!
        """
        if self.queuedTime is not None:
            self.getProxy(0).massDelayedActions(self.queuedTime, self.queuedMessages)
            self.queuedMessages = []
            self.queuedTime = None
Beispiel #25
0
class Server(object):
    """
    A server to host MPI, will delegate all of its calls to the active simulation kernel.
    """
    # Don't forward some of the internally provided functions, but simply raise an AttributeError
    noforward = frozenset(
        ["__str__", "__getstate__", "__setstate__", "__repr__"])

    def __init__(self, name, totalSize):
        """
        Constructor

        :param name: the name of the server, used for addressing (in MPI terms, this is the rank)
        :param totalSize: the total size of the network in which the model lives
        """
        self.name = name
        self.kernel = None
        self.size = totalSize
        self.proxies = [MPIRedirect(i) for i in range(totalSize)]
        from MPIRedirect import LocalRedirect
        self.proxies[name] = LocalRedirect(self)
        self.queuedMessages = []
        self.queuedTime = None
        if totalSize > 1:
            self.threadpool = ThreadPool(2)
            self.bootMPI()

    def getProxy(self, rank):
        """
        Get a proxy to a specified rank. 
        
        This rank is allowed to be the local server, in which case a local shortcut is created.

        :param rank: the rank to return a proxy to, should be an int
        :returns: proxy to the server, either of type MPIRedirect or LocalRedirect
        """
        return self.proxies[rank]

    def checkLoadCheckpoint(self, name, gvt):
        """
        Reconstruct the server from a checkpoint.

        :param name: name of the checkpoint
        :param gvt: the GVT to restore to
        :returns: bool -- whether or not the checkpoint was successfully loaded
        """
        rank = self.name
        #assert debug("Accessing file " + str("%s_%s_%s.pdc" % (name, gvt, rank)))
        try:
            infile = open("%s_%s_%s.pdc" % (name, gvt, rank), 'r')
            pickle.load(infile)
            return True
        except KeyboardInterrupt:
            # If the user interrupts, still reraise
            raise
        except Exception as e:
            # Something went wrong
            print("Error found: " + str(e))
            return False

    def loadCheckpoint(self, name, gvt):
        """
        Reconstruct the server from a checkpoint.

        :param name: name of the checkpoint
        :param gvt: the GVT to restore to
        """
        rank = self.name
        #assert debug("Accessing file " + str("%s_%s_%s.pdc" % (name, gvt, rank)))
        infile = open("%s_%s_%s.pdc" % (name, gvt, rank), 'r')
        self.kernel = pickle.load(infile)
        self.kernel.server = self
        from MPIRedirect import LocalRedirect
        self.proxies[self.name] = LocalRedirect(self)
        infile.close()
        #assert debug("Closing file")
        self.kernel.loadCheckpoint()

    def setPickledData(self, pickled_data):
        """
        Set the pickled representation of the model.

        For use on the controller itself, as this doesn't need to unpickle the model.

        :param pickled_data: the pickled model
        """
        self.kernel.pickledModel = pickled_data

    def prepare(self, scheduler):
        """
        Prepare the server to receive the complete model over MPI

        :param scheduler: the scheduler to use
        """
        data = middleware.COMM_WORLD.bcast(None, root=0)
        if data is not None:
            self.saveAndProcessModel(data, scheduler)
            middleware.COMM_WORLD.barrier()

    def saveAndProcessModel(self, pickledModel, scheduler):
        """
        Receive the model and set it on the server, but also saves it for further reinitialisation.

        :param pickledModel: pickled representation of the model
        :param scheduler: the scheduler to use
        """
        self.sendModel(pickle.loads(pickledModel), scheduler)
        self.kernel.pickledModel = pickledModel

    def getName(self):
        """
        Returns the name of the server

        Is practically useless, since the server is previously addressed using its name. This does have a use as a ping function though.
        """
        # Actually more of a ping function...
        return self.name

    # All calls to this server are likely to be forwarded to the currently
    #  active simulation kernel, so provide an easy forwarder
    def __getattr__(self, name):
        """
        Remote calls happen on the server object, though it is different from the simulation kernel itself. Therefore, forward the actual function call to the correct kernel.

        :param name: the name of the method to call
        :returns: requested attribute
        """
        # For accesses that are actually meant for the currently running kernel
        if name in Server.noforward:
            raise AttributeError()
        return getattr(self.kernel, name)

    def processMPI(self, data, comm, remote):
        """
        Process an incomming MPI message and reply to it if necessary

        :param data: the data that was received
        :param comm: the MPI COMM object
        :param remote: the location from where the message was received
        """
        # Receiving a new request
        resendTag = data[0]
        function = data[1]
        args = data[2]
        kwargs = data[3]
        result = getattr(self, function)(*args, **kwargs)
        if resendTag is not None:
            if result is None:
                result = 0
            comm.send(result, dest=remote, tag=resendTag)

    def listenMPI(self):
        """
        Listen for incomming MPI messages and process them as soon as they are received
        """
        comm = middleware.COMM_WORLD
        status = middleware.MPI.Status()
        while 1:
            #assert debug("[" + str(comm.Get_rank()) + "]Listening to remote " + str(middleware.MPI.ANY_SOURCE) + " -- " + str(middleware.MPI.ANY_TAG))
            # First check if a message is present, otherwise we would have to do busy polling
            data = comm.recv(source=middleware.MPI.ANY_SOURCE,
                             tag=middleware.MPI.ANY_TAG,
                             status=status)
            tag = status.Get_tag()
            #assert debug("Got data from " + str(status.Get_source()) + " (" + str(status.Get_tag()) + "): " + str(data))
            if tag == 0:
                # Flush all waiters, as we will never receive an answer when we close the receiver...
                self.finishWaitingPool()
                break
            elif tag == 1:
                # NOTE Go back to listening ASAP, so do the processing on another thread
                if data[1] == "receive" or data[1] == "receiveAntiMessages":
                    self.threadpool.add_task(Server.processMPI, self,
                                             list(data), comm,
                                             status.Get_source())
                else:
                    # Normal 'control' commands are immediately executed, as they would otherwise have the potential to deadlock the node
                    threading.Thread(
                        target=Server.processMPI,
                        args=[self,
                              list(data), comm,
                              status.Get_source()]).start()
            else:
                # Receiving an answer to a previous request
                try:
                    event = MPIRedirect.waiting[tag]
                    MPIRedirect.waiting[tag] = data
                    event.set()
                except KeyError:
                    # Probably processed elsewhere already, just skip
                    pass
                except AttributeError:
                    # Key was already set elsewhere
                    pass

    def finishWaitingPool(self):
        """
        Stop the complete MPI request queue from blocking, used when stopping simulation is necessary while requests are still outstanding.
        """
        for i in MPIRedirect.waiting:
            try:
                i.set()
            except AttributeError:
                # It was not a lock...
                pass
            except KeyError:
                # It was deleted in the meantime
                pass

    def bootMPI(self):
        """
        Boot the MPI receivers when necessary, on an other thread to prevent blocking
        """
        if self.size > 1:
            listener = threading.Thread(target=Server.listenMPI, args=[self])
            # Make sure that this is a daemon on the controller, as otherwise this thread will prevent the atexit from stopping
            # Though on every other node this should NOT be a daemon, as this is the only part still running

            if middleware.COMM_WORLD.Get_rank() == 0:
                listener.daemon = True
            listener.start()

    def sendModel(self, data, scheduler):
        """
        Receive a complete model and set it.

        :param data: a tuple containing the model, the model_ids dictionary, scheduler name, and a flag for whether or not the model was flattened to allow pickling
        :param scheduler: the scheduler to use
        """
        model, model_ids, flattened = data
        if self.name == 0:
            self.kernel = Controller(self.name, model, self)
        else:
            self.kernel = BaseSimulator(self.name, model, self)
        self.kernel.sendModel(model, model_ids, scheduler, flattened)

    def finish(self):
        """
        Stop the currently running simulation
        """
        sim = self.kernel
        with sim.simlock:
            # Shut down all threads on the topmost simulator
            sim.finished = True
            sim.shouldrun.set()
            self.finishWaitingPool()

            # Wait until they are done
            sim.simFinish.wait()

    def queueMessage(self, time, model_id, action):
        """
        Queue a delayed action from being sent, to make it possible to batch them.
        
        Will raise an exception if previous messages form a different time were not yet flushed!
        This flushing is not done automatically, as otherwise the data would be received at a further timestep
        which causes problems with the GVT algorithm.

        :param time: the time at which the action happens
        :param model_id: the model_id that executed the action
        :param action: the action to execute (as a string)
        """
        if self.queuedTime is None:
            self.queuedTime = time
        elif time != self.queuedTime:
            raise DEVSException(
                "Queued message at wrong time! Probably forgot a flush")
        self.queuedMessages.append([model_id, action])

    def flushQueuedMessages(self):
        """
        Flush all queued messages to the controller. This will block until all of them are queued.
        It is required to flush all messages right after all of them happened and this should happen within the critical section!
        """
        if self.queuedTime is not None:
            self.getProxy(0).massDelayedActions(self.queuedTime,
                                                self.queuedMessages)
            self.queuedMessages = []
            self.queuedTime = None
Beispiel #26
0
class server:
    """docstring for server."""
    def __init__(self, host, port):
        self.max_threads = 8
        self.host = host
        self.port = port
        self.chatrooms = {}  # room_name -> room
        self.chatroom_ids = {}  # id -> name
        self.client_ids = {}
        self.setup_socket()
        self.tp = ThreadPool(self.max_threads)
        self.accept_connections()

    def setup_socket(self):
        '''
        Sets up a tcp socket that listens on the host and port given
        '''
        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.s.bind((self.host, int(self.port)))
        self.s.listen(50)

    def accept_connections(self):
        while True:
            client, address = self.s.accept()
            print("in accept, client: " + str(client))
            print("in accept, address: " + str(address))
            self.client_thread(client, address)
            print("loops in accept connection")

    def client_thread(self, client, addr):
        kargs = {"client": client, "address": addr}
        self.tp.add_task(self.handle, **kargs)
        # t = threading.Thread(target=self.handle, kwargs=kargs).start()

    def handle(self, **kwargs):
        client = kwargs["client"]  # client ip
        address = kwargs["address"]
        data = client.recv(1024)
        data = data.decode("utf-8")
        print(data)
        if data[:5] == "HELO ":
            text = data[6:]
            self.handle_helo(client, text)
        elif data[:len("KILL_SERVICE")] == "KILL_SERVICE":
            self.handle_kill()
        elif data[:4] == "join":
            message = data.split("\n")
            # these need to be changed the remove the labels, now it takes just the label
            message[0] = message[0][14:]  # chatroom name
            message[1] = message[1][10:]  # port
            message[2] = message[2][5:]  # ip
            message[3] = message[3][13:]  # client name
            print("message recieved: ")
            for i in message:
                i = i.strip()
                print(i)
            self.handle_join_room(message[0], message[3], client, address)
        elif data[:len("DISCONNECT: ")] == "DISCONNECT: ":
            self.handle_disconnect()
        else:
            self.handle_other()

    def handle_helo(self, client, text):
        """
        send back required information
        """
        ip = socket.gethostbyname(socket.gethostname())
        reply = "HELO " + text + "IP:" + ip + "\nPort:" + str(self.port) +\
            "\nStudentID:13325878\n"
        client.send(reply.encode('utf-8'))

    def handle_kill(self):
        """
        shuts down service
        """
        os._exit(0)

    def handle_other(self):
        """
        called when we receive an unhandled message
        """
        pass

    def handle_join_room(self,
                         room_name,
                         client_name,
                         client_ip=0,
                         client_port=0):
        if room_name not in self.chatrooms:
            room = Chatroom(room_name)
            self.chatrooms[room_name] = room
            self.chatroom_ids[room.get_id()] = room_name
            room.subscribe(client_name, client_ip)
        else:
            # create and join room
            room = self.chatrooms[room_name]
            room.subscribe(client_name, client_ip)
        room.subscribe(client_name, client_ip, client_port)
        l = room.get_publish_list()
        client_ip.send(
            self.success_response(room_name, self.host, self.port,
                                  room.get_id(), id(client_name)).encode())
        self.client_ids[id(client_name)] = client_name
        for i in l:
            # print("i in l loop: " + str(i) + "\n\n")
            i[1].sendall((str(client_name) + " has joined the room").encode())
            print("sent")
            # this could be wrong

    def success_response(self, room_name0, host, port, room_id, client_id):
        return "JOINED_CHATROOM: " + room_name0 + "\n" +\
        "SERVER_IP: " + str(host) + "\n" +\
        "PORT: " + str(port) + "\n" +\
        "ROOM_REF: " + str(room_id) + "\n" +\
        "JOIN_ID: " + str(client_id)

    def handle_leave_chatroom(self, host, port, room_id, client_name):
        room = self.chatrooms[self.chatroom_ids[room_id]]
        room.unsubscribe(client_name)
        host.send(self.leave_message(room_id, client_name))
        l = room.get_publish_list()
        for i in l:
            i[0].sendall(self.left_message(room_id, client_name).encode())

    def leave_message(self, room_id, client_name):
        cid = self.client_ids[client_name]
        return "LEAVE_CHATROOM: " + str(room_id) + "\n" +\
            "JOIN_ID: " + str(cid) + "\n" +\
            "CLIENT_NAME: " + client_name

    def left_message(self, room_id, client_name):
        cid = self.chatroom_ids[client_name]
        return "LEFT_CHATROOM: " + str(room_id) + "\n" +\
            "JOIN_ID: " + str(cid)

    def handle_disconnect(self):
        self.s.close()

    def handle_chat(self, room_id, client_name, msg):
        mmessage = "CHAT: " + str(room_id) + "\n" +\
            "CLIENT_NAME: " + client_name + "\n" +\
            "MESSAGE: " + msg
        room = self.chatrooms[self.chatroom_ids[room_id]]
        l = room.get_publish_list()
        for i in l:
            i[0].sendall(mmessage.encode())
Beispiel #27
0
class Database:
    """Asynchronous database interface.

    The `driver' argument specifies which database to use. Possible
    values are:

    MySQLdb - for MySQL
    psycopg2 - for Postgres
    """
    def __init__(self,
                 driver=None,
                 database=None, user=None, password=None,
                 host='localhost',
                 ioloop=tornado.ioloop.IOLoop.instance(),
                 num_threads=10,
                 tx_connection_pool_size=5,
                 queue_timeout=1,
                 thread_idle_life=60*60):
        if not(driver):
            raise ValueError("Missing 'driver' argument")
        self._driver = driver
        self._database = database
        self._user = user
        self._password = password
        self._host = host
        self._threadpool = ThreadPool(
            per_thread_init_func=self.create_connection,
            per_thread_close_func=self.close_connection,
            num_threads=num_threads,
            queue_timeout=queue_timeout,
            thread_idle_life=thread_idle_life)
        self._ioloop = ioloop

        # Connection pool for transactions
        self._connection_pool = []
        for i in xrange(tx_connection_pool_size):
            conn = self.create_connection()
            self._connection_pool.append(conn)
        self._waiting_on_connection = deque()

    def create_connection(self):
        """This method is executed in a worker thread.

        Initializes the per-thread state. In this case we create one
        database connection per-thread.
        """
        # if self._driver == "psycopg2":
        #     try:
        #         import psycopg2
        #         conn = psycopg2.connect(database=self._database,
        #                                 user=self._user,
        #                                 password=self._password,
        #                                 host=self._host)
        #     except Exception as ex:
        #         raise ex
        if self._driver == "MySQLdb":
            try:
                import MySQLdb
                conn = MySQLdb.connect(db=self._database,
                                       user=self._user,
                                       passwd=self._password,
                                       host=self._host,
                                       port=3306)
            except Exception as ex:
                raise ex
        else:
            raise ValueError("Unknown driver %s" % self._driver)
        return conn

    def close_connection(self, conn):
        conn.close()

    def stop(self):
        self._threadpool.stop()
        for conn in self._connection_pool:
            conn.close()

    @async
    def beginTransaction(self, callback):
        """Begins a transaction. Picks up a transaction from the pool
        and passes it to the callback. If none is available, adds the
        callback to `_waiting_on_connection'.
        """
        if self._connection_pool:
            conn = self._connection_pool.pop()
            callback(conn)
        else:
            self._waiting_on_connection.append(callback)

    @async
    def commitTransaction(self, connection, callback):
        self._threadpool.add_task(
            partial(self._commitTransaction, connection, callback))

    def _commitTransaction(self, conn, callback, thread_state=None):
        """Invoked in a worker thread.
        """
        conn.commit()
        self._ioloop.add_callback(
            partial(self._releaseConnectionInvokeCallback, conn, callback))

    @async
    def rollbackTransaction(self, connection, callback):
        self._threadpool.add_task(
            partial(self._rollbackTransaction, connection, callback))

    def _rollbackTransaction(self, conn, callback, thread_state=None):
        """Invoked in a worker thread.
        """
        conn.rollback()
        self._ioloop.add_callback(
            partial(self._releaseConnectionInvokeCallback, conn, callback))

    def _releaseConnectionInvokeCallback(self, conn, callback):
        """Release the connection back in the connection pool and
        invoke the callback. Invokes any waiting callbacks before
        releasing the connection into the pool.
        """
        # First invoke the callback to let the program know we're done
        # with the transaction.
        callback(conn)
        # Now check to see if we have any pending clients. If so pass
        # them the newly released connection.
        if self._waiting_on_connection:
            callback = self._waiting_on_connection.popleft()
            callback(conn)
        else:
            self._connection_pool.append(conn)

    @async
    def runQuery(self, query, args=None, conn=None, callback=None):
        """Send a SELECT query to the database.

        The callback is invoked with all the rows in the result.
        """
        self._threadpool.add_task(
            partial(self._query, query, args, conn), callback)

    def _query(self, query, args, conn=None, thread_state=None):
        """This method is called in a worker thread.

        Execute the query and return the result so it can be passed as
        argument to the callback.
        """
        if not conn:
            conn = thread_state
            should_commit = True
        else:
            should_commit = False
        cursor = conn.cursor()
        try:
            cursor.execute(query, args)
        except:
            raise
        else:
            rows = cursor.fetchall()
            return rows
        finally:
            if should_commit:
                conn.commit()
            cursor.close()

    @async
    def runOperation(self, stmt, args=None, conn=None, callback=None):
        """Execute a SQL statement other than a SELECT.

        The statement is committed immediately. The number of rows
        affected by the statement is passed as argument to the
        callback.
        """
        self._threadpool.add_task(
            partial(self._execute, stmt, args, conn), callback)

    def _execute(self, stmt, args, conn=None, thread_state=None):
        """This method is called in a worker thread.

        Executes the statement.
        """
        # Check if stmt is a tuple. This can happen when we use map()
        # with adisp to execute multiple statements in parallel.
        if isinstance(stmt, tuple):
            args = stmt[1]
            stmt = stmt[0]
        if not conn:
            conn = thread_state
            should_commit = True
        else:
            should_commit = False
        cursor = conn.cursor()
        
        try:
            cursor.execute(stmt, args)
        except:
            raise
        else:
            rowcount = cursor.rowcount
            return rowcount
        finally:
            if should_commit:
                conn.commit()
            cursor.close()

    @async
    def runOperationMany(self, stmt, args=None, conn=None, callback=None):
        """Execute a SQL statement other than a SELECT.

        The statement is committed immediately. The number of rows
        affected by the statement is passed as argument to the
        callback.
        """
        self._threadpool.add_task(
            partial(self._executeMany, stmt, args, conn), callback)

    def _executeMany(self, stmt, args, conn=None, thread_state=None):
        """This method is called in a worker thread.

        Executes the statement.
        """
        # Check if stmt is a tuple. This can happen when we use map()
        # with adisp to execute multiple statements in parallel.
        if isinstance(stmt, tuple):
            args = stmt[1]
            stmt = stmt[0]
        if not conn:
            conn = thread_state
            should_commit = True
        else:
            should_commit = False
        cursor = conn.cursor()
        
        try:
            cursor.executemany(stmt, args)
        except:
            raise
        else:
            rowcount = cursor.rowcount
            return rowcount
        finally:
            if should_commit:
                conn.commit()
            cursor.close()
Beispiel #28
0
class Server(object): #cython-remove
    noforward = frozenset(["__str__", "__getstate__", "__setstate__", "__repr__", "simstack"])
    local = frozenset(["setCoupledModel", "finish", "endNestedSim", "nestedSim", "continuePrevSim", "startNestedSim", "announceSim", "announceEnd", "setKernels", "notifyDone"])

    def __init__(self, name, daemon = None):
        self.daemon = daemon
        self.name = str(name)
        self.simstack = [None]
        import logging
        setLogger(str(name), ('localhost', 514), logging.DEBUG)
        MPIRedirect.local = self
        if middleware.USE_MPI:
            self.threadpool = ThreadPool(5)

    def getName(self):
        # Actually more of a ping function...
        return self.name

    # All calls to this server are likely to be forwarded to the currently
    #  active simulation kernel, so provide an easy forwarder
    def __getattr__(self, name):
        # For accesses that are actually meant for the currently running kernel
        if name in Server.noforward:
            raise AttributeError()
        if self.simstack[-1] is None:
            return getattr(self.simstack[-2], name)
        else:
            return getattr(self.simstack[-1], name)

    def setNextLP(self, location, size):
        self.nextLP = getProxy(location)
        self.nextLPid = location
        self.size = size
        for i in self.simstack:
            i.setNextLP(location, size)

    def getNextLP(self):
        return self.nextLPid

    def nestedSim(self):
        self.simstack[-1].nestedlock.acquire()
        # Now the current simulation is halted, ready to add a new simulation

    def endNestedSim(self):
        sim = self.simstack.pop()
        # Clear simulator
        del sim

    def continuePrevSim(self):
        self.simstack[-1].nestedlock.release()

    def getLayer(self):
        return len(self.simstack)

    def startNestedSim(self):
        self.simstack.append(None)

    def announceSim(self, requestname):
        # A new simulation must start, halt ALL simulators
        # Wait for the GVT to stop running
        #NOTE mandatory that this runs on the controller
        # Try to initiate a nested simulation at this level
        if not self.simstack[-1].nestingLock.acquire(False):
            return False
        for i in range(self.size):
            if str(i) == str(requestname):
                # Don't wait for ourselves, as we are still simulating while inside the nested model
                continue
            getProxy(i).nestedSim()
        for i in range(self.size):
            # All cores are locked, initiate a new round
            getProxy(i).startNestedSim()
        return True

    def announceEnd(self, requestname):
        # Prevent GVT algorithm from running in this unstable state
        for i in range(self.size):
            getProxy(i).endNestedSim()
        for i in range(self.size):
            if str(i) == str(requestname):
                # Don't continue with ourselves, since we are actually still running
                continue
            getProxy(i).continuePrevSim()
        # Unlock GVT lock
        self.simstack[-1].nestingLock.release()

    def processMPI(self, data, comm, remote):
        # Receiving a new request
        resendTag = data[0]
        function = data[1]
        kernel = data[2]
        args = data[3]
        kwargs = data[4]
        # Run this simulation at the topmost simulation kernel
        if (function in Server.local):
            func = getattr(self, function)
        else:
            func = getattr(self.simstack[kernel], function)
        result = func(*args, **kwargs)
        if result is None:
            result = 0
        if resendTag is not None:
            assert debug("Sending data to " + str(remote) + " -- " + str(resendTag) + ": " + str(result))
            comm.send(result, dest=remote, tag=resendTag)

    def listenMPI(self):
        """
        import cProfile
        cProfile.runctx("self.profile_listenMPI()", locals(), globals())

    def profile_listenMPI(self):
        """
        comm = COMM_WORLD
        #NOTE could preallocate the memory for the incomming call, though possible threading problems
        lasttime = time.time()
        status = MPI.Status()
        while True:
            assert debug("[" + str(comm.Get_rank()) + "]Listening to remote " + str(MPI.ANY_SOURCE) + " -- " + str(MPI.ANY_TAG))
            # First check if a message is present, otherwise we would have to do busy polling
            """
            while not comm.Iprobe(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status):
                # Yield to other threads
                # 0 to mean that we should yield elsewhere, UNLESS no other process is waiting
                time.sleep(0)
            """
            data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
            tag = status.Get_tag()
            assert debug("Got data from " + str(status.Get_source()) + " (" + str(status.Get_tag()) + "): " + str(data))
            if tag == 0:
                # Flush all waiters, as we will never receive an answer when we close the receiver...
                for i in MPIRedirect.waiting:
                    try:
                        i.set()
                    except AttributeError:
                        # It was not a lock...
                        pass
                    except KeyError:
                        # It was deleted in the meantime
                        pass
                break
            elif tag == 1:
                # NOTE Go back to listening ASAP, so do the processing on another thread
                if data[1] == "getTargets":
                    # This should not be queued as it might be called recursively and we want to prevent this
                    threading.Thread(target=Server.processMPI, args=[self, list(data), comm, status.Get_source()]).start()
                else:
                    self.threadpool.add_task(Server.processMPI, self, list(data), comm, status.Get_source())
            else:
                # Receiving an answer to a previous request
                try:
                    event = MPIRedirect.waiting[tag]
                    MPIRedirect.waiting[tag] = data
                    event.set()
                except KeyError:
                    # Probably processed elsewhere already, just skip
                    pass
                except AttributeError:
                    # Key was already set elsewhere
                    pass

    def bootMPI(self):
        if COMM_WORLD.Get_size() > 1:
            listener = threading.Thread(target=Server.listenMPI, args=[self])
            listener.start()

    def setCoupledModel(self, modelCode, parent_location, parent_id, imports = None):
        if isinstance(modelCode, str):
            # Put this here for Python3
            model = None
            if imports is not None:
                exec(imports)
            model = eval(modelCode)
        else:
            model = modelCode
        if not isinstance(model, CoupledDEVS):
            raise DEVSException("Only coupled models are allowed to be distributed")
        model.internal = False
        model.location = self.name
        model.setRootSim(self, self.name)
        if parent_id is not None:
            model.parent = RemoteCDEVS(parent_location)
        else:
            model.parent = None
        if self.name == "0":
            sim = Controller(self.name, model)
        else:
            sim = BaseSimulator(self.name, model)
        self.simstack[-1] = sim
        # Should still fill in all model_ids
        sim.setID()

    def notifyDone(self, name):
        for i in self.locations:
            proxy = getProxy(i)
            makeOneway(proxy, "notifyDone")
            proxy.notifyKernelDone(name)

    def finish(self):
        sim = self.simstack[-1]
        sim.simlock.acquire()
        sim.nestedlock.acquire()
        # Shut down all threads on the topmost simulator
        sim.finished = True
        sim.inmsg.set()
        sim.shouldrun.set()
        if middleware.USE_MPI:
            # It is possible that we are still waiting on a reply
            #  since it was agreed to stop, all messages should be useless
            for i in MPIRedirect.waiting:
                try:
                    i.set()
                except AttributeError:
                    # It was not a lock...
                    pass
                except KeyError:
                    # It was deleted in the meantime
                    pass

        # Wait until they are done
        sim.queueFinish.wait()
        sim.simFinish.wait()
        if middleware.USE_PYRO:
            if len(self.simstack) == 1:
                try:
                    self.daemon.shutdown()
                except:
                    pass
        # Release the simlock, since we are possibly working in one of the layers
        #  which will try to grab the simlock
        sim.nestedlock.release()
        sim.simlock.release()

    def setKernels(self, kernels):
        self.locations = kernels
 def run(self):
     pool = ThreadPool(3)
     pool.add_task(self.__listen, [self.__requests])
     pool.add_task(self.__respond, [self.__responses])
     pool.add_task(self.__execute, [self.react, self.__requests, self.__responses])
Beispiel #30
0
# An algorithm node, usually with 2 threads each (messaging, main loop)
def start_node(index):
    amqp_url = 'amqp://*****:*****@localhost:5672/%2F'
    node_identifier = "MulticastNode" + str(index)
    consumer = AlgorithmNode(
        node_identifier,
        amqp_url=amqp_url,
        on_message_callback_debug=on_message_callback_debug)
    consumer.run()


def start_async_node(index):
    amqp_url = 'amqp://*****:*****@localhost:5672/%2F'
    node_identifier = "MulticastNode" + str(index)
    consumer = AsyncNode(node_identifier,
                         amqp_url=amqp_url,
                         on_message_receive_debug=on_message_callback_debug)
    consumer.__run()


if __name__ == '__main__':

    # Kickstart the node workers
    # feel free to introduce random delays like f.e. time.sleep(100) between each start

    LOGGER.info("Booting algorithm simulator for 2 workers")
    pool = ThreadPool(1)
    for i in range(len(pool.workers)):
        pool.add_task(start_async_node, i)
    pool.wait_completion()
Beispiel #31
0
    # set/get
    print r.set("key", 'b' * 56000)
    print len(r.get("key"))

    # incr
    print r.set("incr_key", 0)
    print r.get("incr_key")
    print r.incr('incr_key')
    print r.get("incr_key")


def press_test():
    r = redis.StrictRedis(host='localhost', port=8323)
    for i in range(10000):
        key = 'foo_%d' % i
        r.set(key, 'b' * i)
        if i % 1000 == 0:
            print key, "->", len(r.get(key))


if __name__ == "__main__":
    #functional_test()
    # Create thread pool with nums threads
    pool = ThreadPool(32)
    # Add a task into pool
    for n in range(10):
        pool.add_task(functional_test)
        pool.add_task(press_test)
    # Join and destroy all threads
    pool.destroy()
Beispiel #32
0
class Mailer (object):
    MAX_TRIES = 3
    DELAY_AFTER_FAILURES = 2

    def __init__(self, smtp_server, smtp_port, num_threads, queue_timeout, default_sender, ioloop=tornado.ioloop.IOLoop.current()):
        self.threadpool = ThreadPool(
            poolname='Mailer',
            thread_global_data=self,
            thread_quit_hook=self._quit_smtp,
            num_threads=num_threads,
            queue_timeout=queue_timeout)
        self.smtp_server = smtp_server
        self.smtp_port = smtp_port
        self.default_sender = default_sender
        self.ioloop = ioloop

    def _create_smtp(self):
        """This method is executed in a worker thread.

        Initializes the per-thread state. In this case we create one
        smtp per-thread.
        """
        smtp = smtplib.SMTP(self.smtp_server, self.smtp_port)
        return smtp

    @staticmethod
    def _quit_smtp(global_data, local_data):
        smtp = local_data.smtp
        if smtp is not None:
            smtp.quit()

    def send(self, receivers, subject, body, sender=None, reply_to=None, callback=None):
        self.threadpool.add_task(
            partial(self._send, sender or self.default_sender, receivers, subject, body, reply_to),
            callback
        )

    def _send(self, sender, receivers, subject, body, reply_to=None, global_data=None, local_data=None):
        try:
            for i in range(self.MAX_TRIES, 0, -1):
                log_email.debug('sending: try=%d, to=%s, subj=%s', self.MAX_TRIES - i + 1, receivers, subject)
                try:
                    smtp = local_data.smtp if hasattr(local_data, 'smtp') else None
                    if smtp is None:
                        smtp = global_data._create_smtp()
                        local_data.smtp = smtp
                    msg = MIMEMultipart("alternative")
                    msg["Subject"] = subject
                    if reply_to is not None:
                        msg['reply-to'] = reply_to
                    part1 = MIMEText(body, "plain", "utf-8")
                    msg.attach(part1)

                    smtp.sendmail(sender,
                                  receivers,
                                  msg.as_string().encode('ascii')
                                  )
                    log_email.debug('mail sent succesfully')
                    return True
                except smtplib.SMTPException as e:
                    if i == 1:
                        raise e
                    # global_data.quit_smtp()
                    local_data.smtp = None
                    time.sleep(self.DELAY_AFTER_FAILURES)
        except:
            etype, evalue, tb = sys.exc_info()
            log_email.error('can\'t send mail: subject=%s, cause=%s/%s', subject, etype, evalue)
            log_email.debug('email body: %s', body)
            log_email.debug('full stacktrace:\n%s', loglib.TracebackFormatter(tb))
            return False
Beispiel #33
0
class Scheduler(object):
    """ job scheduler """
    def __init__(self, total_resources, job_tree):
        """" constructor """
        self._total_resources = total_resources
        if not isinstance(self._total_resources, int):
            raise "total_resources is NOT an int"
        if self._total_resources < 1:
            raise "no resources available"
        self._resources_available = total_resources
        self._job_tree = job_tree
        if not isinstance(self._job_tree, list):
            raise "job_tree is NOT a list"
        # thread pool for producer/consumer
        self._tp_prod_cons = ThreadPool(2)
        # semaphores
        self._free_res_sem = None
        self._used_res_sem = None
        # queue for exchange
        self._job_buffer = Queue()
        # thread pool fo the resource management
        self._tp_resources = ThreadPool(total_resources)
        # thread pool for the synchronous management of messages
        self._msg_manager = ThreadPool(1)
        # descriptions
        self._results = ["NOT_TESTED", "SCHEDULED", "RUNNING", "OK", "FAILED", "SKIPPED"]

    def run(self):
        """ start the scheduler """
        # semaphore managed by the producer
        self._free_res_sem = Semaphore(self._total_resources)
        # semaphore managed by the consumer
        self._used_res_sem = Semaphore(0)
        # threads creation for producer/consumer
        self._tp_prod_cons.add_task(func=self.__producer)
        self._tp_prod_cons.add_task(func=self.__consumer)
        self._tp_prod_cons.wait_completion()
        self._job_buffer.join()

    def __print_msg_private(self, msg):
        print "%s --> %s" % (dt.datetime.now(), msg)

    def __print_msg(self, msg):
        self._msg_manager.add_task(self.__print_msg_private, msg)

    def __job_run(self, job):
        start_time = dt.datetime.now()
        five_sec = timedelta(seconds=1) * 5

        self.__print_msg("     Job: %s -- Start time: %s" % (job.name, start_time))
        while True:
            sleep(1)
            if dt.datetime.now() >= start_time + five_sec:
                break
        self.__print_msg("     Job: %s -- End time: %s" % (job.name, dt.datetime.now()))
        #-------------------
        self._job_buffer.task_done()
        for _ in range(job.resources):
            self._used_res_sem.acquire()
            self._resources_available = self._resources_available + 1
            self._free_res_sem.release()
        # ----------------
        # get the result of the test on this job
        # FOR TEST: put always
        if job.name == "Test_010":
            job.status = JobState.TEST_FAIL
        else:
            job.status = JobState.TEST_OK
        self.__print_msg("     Job: %s executed with result -> %s." % \
        (job.name, self._results[job.status]))
        self.__print_msg("     Job: %s resources released -> %d." % \
        (job.name, job.resources))
        self.__print_msg("     Job: Resources available now: %d." % \
        (self._resources_available))
        # if the test fails the put all the job children in the TEST_SKIPPED state
        if job.status == JobState.TEST_FAIL:
            self.__skip_children(job)

    def __get_next_job(self, job):
        next_job = None
        # if the parent job has not finished the test then skip
        if job.parent:
            if job.parent.status != JobState.TEST_OK:
                return next_job
        if job.status == JobState.NOT_TESTED and \
            job.resources <= self._resources_available:
            next_job = job
        if next_job is None and job.children:
            next_job = self.__get_next_job(job.children[0])
        if next_job is None and job.next:
            next_job = self.__get_next_job(job.next)
        return next_job

    def __job_not_scheduled(self, job):
        job_not_scheduled = (job.status == JobState.NOT_TESTED)
        if not job_not_scheduled and job.children:
            job_not_scheduled = self.__job_not_scheduled(job.children[0])
        if not job_not_scheduled and job.next:
            job_not_scheduled = self.__job_not_scheduled(job.next)
        return job_not_scheduled

    def __producer(self):
        """ get the next job and send it to the buffer """
        stop_producer = False
        while True:
            # if there is at least one resource available
            if self._resources_available > 0:
                # search the next job
                next_job = self.__get_next_job(self._job_tree[0])
                if next_job:
                    self.__print_msg("Producer: scheduled the job -> %s resources needed: %d" % \
                    (next_job.name, next_job.resources))
                    next_job.status = JobState.SCHEDULED
                    for _ in range(next_job.resources):
                        self._free_res_sem.acquire()
                        self._resources_available = self._resources_available - 1
                        self._used_res_sem.release()
                    self.__print_msg("Producer: Resources still available: %d." % \
                    (self._resources_available))
                    # put the job in the buffer
                    self._job_buffer.put(next_job)
                else:
                    # are there jobs not tested yet? If not stop then stop Producer
                    if not self.__job_not_scheduled(self._job_tree[0]):
                        stop_producer = True
            if stop_producer:
                self.__print_msg("Producer: finished.")
                break
            sleep(1)

    def __job_not_executed(self, job):
        job_not_executed = (job.status < JobState.TEST_OK)
        if not job_not_executed and job.children:
            job_not_executed = self.__job_not_executed(job.children[0])
        if not job_not_executed and job.next:
            job_not_executed = self.__job_not_executed(job.next)
        return job_not_executed

    def __skip_children(self, parent_job):
        for child_job in parent_job.children:
            child_job.status = JobState.TEST_SKIPPED
            self.__print_msg("     Job: %s marked as %s." % \
            (child_job.name, self._results[child_job.status]))
            self.__skip_children(child_job)

    def __consumer(self):
        """ get the next scheduled job from the buffer and execute it """
        stop_consumer = False
        while True:
            if not self._job_buffer.empty():
                # get the job from the buffer
                job = self._job_buffer.get()
                job.status = JobState.RUNNING
                self.__print_msg("Consumer: the job -> %s is sent to execution." % (job.name))
                # add the job to the thread pool
                self._tp_resources.add_task(self.__job_run, job)
            else:
                # are all the jobs tested? If yes then stop Consumer
                if not self.__job_not_executed(self._job_tree[0]):
                    stop_consumer = True
            if stop_consumer:
                self.__print_msg("Consumer: finished.")
                break
            sleep(1)
        self._tp_resources.wait_completion()
        self._msg_manager.wait_completion()
Beispiel #34
0
#!/usr/bin/env python
##  coding:utf-8

from threadpool import ThreadPool
import time


def do_work(*args, **kwds):
    num = kwds['id']
    my_num = 0
    while my_num < 10:
        print 'num:%d' % num
        print 'do some thing:%d' % my_num
        my_num = my_num + 1
        time.sleep(num)


# Create thread pool with nums threads
pool = ThreadPool(5)
# Add a task into pool
pool.add_task(do_work, None, id=1)
pool.add_task(do_work, None, id=2)
pool.add_task(do_work, None, id=3)
# Join and destroy all threads
pool.destroy()