def test(): get_page('http://news.qq.com', 'gb2312', 10).addCallback( lambda cbv: (util.println(cbv[0], cbv[1][1500:2000].encode('utf-8')), reactor.stop())).addErrback( lambda cbv: (util.println(traceback.print_tb(cbv.tb), cbv.value), reactor.stop()))
def calculateForOneInput(self, input1, input2): self.neuralNetwork[0].output = 1 #B1 self.neuralNetwork[1].output = input2 #I2 self.neuralNetwork[2].output = input1 #I1 #The hidden layer self.neuralNetwork[3].output = 1 #Calculations for H2 self.neuralNetwork[4].sum = ( (self.neuralNetwork[0].output * self.weights[0]) + (self.neuralNetwork[1].output * self.weights[2]) + (self.neuralNetwork[2].output * self.weights[4])) self.neuralNetwork[4].output = self.sigmoid(self.neuralNetwork[4].sum) #Calculations for H1 self.neuralNetwork[5].sum = ( (self.neuralNetwork[0].output * self.weights[1]) + (self.neuralNetwork[1].output * self.weights[3]) + (self.neuralNetwork[2].output * self.weights[5])) self.neuralNetwork[5].output = self.sigmoid(self.neuralNetwork[5].sum) #Calculations for O1 self.neuralNetwork[6].sum = ( (self.neuralNetwork[3].output * self.weights[6]) + (self.neuralNetwork[4].output * self.weights[7]) + (self.neuralNetwork[5].output * self.weights[8])) self.neuralNetwork[6].output = self.sigmoid(self.neuralNetwork[6].sum) print( "The output is: " + str(int(round(self.neuralNetwork[6].output))) ) #Even with a massive training loop my outputs will never be whole numbers on their own. Help that along by rounding. println()
def xmlrpc_GetBalance(self, request, code): """ Get balance by card code """ println('Call GetBalance ' + repr(request.getClientIP()) + ' card ' + repr(code)) card = yield self.cardscollection.find_one({"code":code}) res = yield card['balance'] defer.returnValue(res)
def xmlrpc_GetBalance(self, request, code): """ Get balance by card code """ println('Call GetBalance ' + repr(request.getClientIP()) + ' card ' + repr(code)) card = yield self.cardscollection.find_one({"code": code}) res = yield card['balance'] defer.returnValue(res)
def update_tasks_list(self, tasks): sys.stdout.write('Updating task list...') JolicloudRestoreUtilityBase.update_tasks_list(self, tasks) for t in tasks: t['disabled'] = True if hasattr(JolicloudRestoreUtilityBase, '_task_%s' % t['task']): t['disabled'] = False println('Done') self.run_next_task()
def run_next_task(self): if self._current_task < len(self._tasks): current_task = self._tasks[self._current_task] if not current_task['disabled'] and hasattr(JolicloudRestoreUtilityBase, '_task_%s' % current_task['task']): println('Executing task %d out of %d: %s' % ( self._current_task + 1, len(self._tasks), current_task['description'] )) JolicloudRestoreUtilityBase.run_next_task(self)
def _test(): agent = MachineConnector(reactor) foo = yield agent.process_list() println(foo) for pid in foo: try: x = yield Process(pid, agent).get_connections() if not x: continue println(x) except: traceback.print_exc()
def xmlrpc_SetBalance(self, request, code, balance): """ Set balance by card code """ println('Call SetBalance' + repr(request.getClientIP()) + ' card ' + repr(code) + ' balance ' + repr(balance)) self.cardscollection.find_one_and_update( {"code": code}, {"$set": {"balance": balance}} ) defer.returnValue((yield True))
def get_files(dir_p, delete_file): file_list = os.listdir(dir_p) i = 0 for count, file_child in enumerate(file_list): dir_1 = dir_p + "/" + file_child fl = os.path.isdir(dir_1) if fl: if file_child == 'build': println(dir_1) shutil.rmtree(dir_1) # 删除非空目录 else: get_files(dir_1)
def test(): """List info of all currently running processes emulating a ps -aux output. @return L{twisted.internet.defer.Deferred} """ import datetime import time import os today_day = datetime.date.today() @defer.inlineCallbacks def get_process_info(pid): proc = Process(pid) user = proc.username if os.name == 'nt' and '\\' in user: user = user.split('\\')[1] pid = proc.pid data = yield proc.get_cpu_percent(interval=None) cpu = round(data, 1) data = yield proc.get_memory_percent() mem = round(data, 1) data = yield proc.get_memory_info() rss, vsz = [x / 1024 for x in data] # If process has been created today print H:M, else MonthDay start = datetime.datetime.fromtimestamp(proc.create_time) if start.date() == today_day: start = start.strftime("%H:%M") else: start = start.strftime("%b%d") data = yield proc.get_cpu_times() cputime = time.strftime("%M:%S", time.localtime(sum(data))) cmd = ' '.join(proc.cmdline) # where cmdline is not available UNIX shows process name between # [] parentheses if not cmd: cmd = "[%s]" % proc.name defer.returnValue("%-9s %-5s %-4s %4s %7s %7s %5s %8s %s" \ % (user, pid, cpu, mem, vsz, rss, start, cputime, cmd)) util.println("%-9s %-5s %-4s %4s %7s %7s %5s %7s %s" \ % ("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "START", "TIME", "COMMAND")) pids = yield get_pid_list() pids.sort() for pid in pids: try: line = yield get_process_info(pid) except (AccessDenied, NoSuchProcess): pass else: util.println(line)
def xmlrpc_SetBalance(self, request, code, balance): """ Set balance by card code """ println('Call SetBalance' + repr(request.getClientIP()) + ' card ' + repr(code) + ' balance ' + repr(balance)) self.cardscollection.find_one_and_update( {"code": code}, {"$set": { "balance": balance }}) defer.returnValue((yield True))
def xmlrpc_AddCard(self, request, code, balance): """ Add card and return true """ println('Call AddCard ' + repr(request.getClientIP() + ' card ' + repr(code))) try: self.cardscollection.insert({"code": code, "balance": balance}) res = yield True except: res = yield False defer.returnValue(res)
def xmlrpc_DecBalance(self, request, code, dec_value): """ Dec balance by code """ println('Call DecBalance' + repr(request.getClientIP()) + ' card ' + repr(code) + ' balance ' + repr(dec_value)) rec = yield self.cardscollection.find_one_and_update( {"code": code}, {"$inc": {"balance": -dec_value}}, return_document=ReturnDocument.AFTER ) res = yield rec['balance'] defer.returnValue(res)
def add_new_projects(): with open('resources.txt', 'r') as resources_file: resources = resources_file.readlines() resources_dict = [] for line in resources: cleaned_project = {} resource_url = line.split(' ') if len(resource_url) > 0 and len(resource_url) > 0 and isinstance(resource_url, list) : resource_url = resource_url[0] print("PARSED URL" + resource_url) if 'https' in resource_url: try: metadata_from_url = get_metadata(identifier=resource_url, format='n3') cleaned_project['description'] = metadata_from_url['description'] cleaned_project['title'] = metadata_from_url['title'] cleaned_project['hopmepage'] = resource_url cleaned_project['category'] = '' resource_description = '' if cleaned_project['description'] != None: resource_description = cleaned_project['description'] if resource_description.__contains__('roku'): cleaned_project['category'] = 'roku' if cleaned_project['description'].__contains__('player'): cleaned_project['category'] = 'players' if cleaned_project['description'].__contains__('encoding'): cleaned_project['category'] = 'encoding' if cleaned_project['description'].__contains__('ffmpeg'): cleaned_project['category'] = 'ffmpeg' if cleaned_project['description'].__contains__('hls'): cleaned_project['category'] = 'hls' except : print('filed parsin metadata for resource url ') else: print("NEED TO FIX HTTPS " + resource_url) resources_dict.append(cleaned_project) with open('resources.json', 'w') as outfile: json.dump(resources_dict, outfile, sort_keys=True, indent=4, ensure_ascii=False) println("wrote resources.json to file") return resources_dict
def test(): """List info of all currently running processes emulating a ps -aux output. @return L{twisted.internet.defer.Deferred} """ import datetime import time import os today_day = datetime.date.today() @defer.inlineCallbacks def get_process_info(pid): proc = Process(pid) user = proc.username if os.name == 'nt' and '\\' in user: user = user.split('\\')[1] pid = proc.pid data = yield proc.get_cpu_percent(interval=None) cpu = round(data, 1) data = yield proc.get_memory_percent() mem = round(data, 1) data = yield proc.get_memory_info() rss, vsz = [x / 1024 for x in data] # If process has been created today print H:M, else MonthDay start = datetime.datetime.fromtimestamp(proc.create_time) if start.date() == today_day: start = start.strftime("%H:%M") else: start = start.strftime("%b%d") data = yield proc.get_cpu_times() cputime = time.strftime("%M:%S", time.localtime(sum(data))) cmd = ' '.join(proc.cmdline) # where cmdline is not available UNIX shows process name between # [] parentheses if not cmd: cmd = "[%s]" % proc.name defer.returnValue("%-9s %-5s %-4s %4s %7s %7s %5s %8s %s" \ % (user, pid, cpu, mem, vsz, rss, start, cputime, cmd)) util.println("%-9s %-5s %-4s %4s %7s %7s %5s %7s %s" \ % ("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "START", "TIME", "COMMAND")) pids = yield get_pid_list() pids.sort() for pid in pids: result = yield get_process_info(pid) util.println(result)
def xmlrpc_DecBalance(self, request, code, dec_value): """ Dec balance by code """ println('Call DecBalance' + repr(request.getClientIP()) + ' card ' + repr(code) + ' balance ' + repr(dec_value)) rec = yield self.cardscollection.find_one_and_update( {"code": code}, {"$inc": { "balance": -dec_value }}, return_document=ReturnDocument.AFTER) res = yield rec['balance'] defer.returnValue(res)
def calculatePercentage(self): numbers = self.categorizeAllLines() allChanged = numbers[0] allRemoved = numbers[1] allAdded = numbers[2] overall = "All change: " + allChanged[0] + " " + allRemoved[0] + " " + allAdded[0] overallChange = int(allChanged[0]) + int(allRemoved[0]) + int(allAdded[0]) println(overall) refactoring = [self.addFromIndexOne(allChanged) ,self.addFromIndexOne(allRemoved), self.addFromIndexOne(allAdded)] refactoringInfo = 'Refactoring change: ' + str(refactoring[0]) + ' ' + str(refactoring[1]) + ' ' + str(refactoring[2]) refactoringChange = refactoring[0] + refactoring[1] +refactoring[2] println(refactoringInfo) self.percentage = float(refactoringChange) / overallChange
def extract_context(html, url): soup = BeautifulSoup(html) # Insert into Content (under this domain) texts = soup.findAll(text=True) try: Content.objects.create( url=url, title=soup.title.string, summary=helpers.strip_tags(" \n".join(filter(visible, texts)))[:4000], last_crawled_at=datetime.datetime.utcnow().replace(tzinfo=pytz.utc) ) except IntegrityError: println('%s - already existed in Content' % url) soup.prettify() return [str(anchor['href']) for anchor in soup.findAll('a', attrs={'href': re.compile("^http://")}) if anchor['href']]
def render_GET(self, request): request.write("hello") d = request.notifyFinish() d.addCallback(lambda _: println('finished normally')) d.addErrback(println, "error") reactor.callLater(10, request.finish) return server.NOT_DONE_YET
def extract_context(html, url): soup = BeautifulSoup(html) # Insert into Content (under this domain) texts = soup.findAll(text=True) try: Content.objects.create( url=url, title=soup.title.string, summary=helpers.strip_tags(" \n".join(filter(visible, texts)))[:4000], last_crawled_at=datetime.datetime.utcnow().replace(tzinfo=pytz.utc), ) except IntegrityError: println("%s - already existed in Content" % url) soup.prettify() return [ str(anchor["href"]) for anchor in soup.findAll("a", attrs={"href": re.compile("^http://")}) if anchor["href"] ]
def useCursor(cur): # execute a query d = cur.execute(query) # fetch the first row from the result d.addCallback(lambda _: cur.fetchone()) # output it d.addCallback(lambda result: util.println('First table name:', result[0])) # and close the cursor return d.addCallback(lambda _: cur.close())
def do(self): manager.d.addCallback(lambda _: manager.conn.runQuery( 'select tablename from pg_tables')) manager.d.addCallback(lambda result: util.println( 'All tables:', result)) manager.d.addCallback(lambda _: manager.conn.close()) manager.d.addErrback(lambda _: log.err) manager.d.addBoth(lambda _: reactor.stop())
def union(p, q): for url in p: parsed = urlparse(str(url)) if parsed.netloc and parsed.netloc != "www.webhostingtalk.com": url = "http://%s/" % parsed.netloc if parsed.netloc and url not in q: print url if parsed.netloc != "www.webhostingtalk.com": # Insert into Site try: Website.objects.create( url=url, name=parsed.netloc, last_crawled_at=datetime.datetime.utcnow().replace(tzinfo=pytz.utc) ) except IntegrityError: println("%s - already existed in Site" % url) else: # We want to deep crawl webhosting talk q.append(url)
def do(self): manager.d.addCallback( lambda _: manager.conn.runQuery('select tablename from pg_tables')) manager.d.addCallback( lambda result: util.println('All tables:', result)) manager.d.addCallback(lambda _: manager.conn.close()) manager.d.addErrback(lambda _: log.err) manager.d.addBoth(lambda _: reactor.stop())
def send(self): url = "".join(("http://" , self.req['Kannel-host']\ ,":", self.req['Kannel-port']\ ,"/cgi-bin/sendsms?smsc="\ ,self.req['smsc']\ ,"&account=" , self.req['account']\ ,"&limit=" , self.req['limit']\ ,"&udh=" , urllib2.quote(self.req['udh'])\ ,"&from=", urllib2.quote(self.req['from'])\ ,"&to=" , self.req['to']\ ,"&text=", urllib.quote(self.req['text'])\ ,"&coding=", self.req['coding']\ ,"&dlr-mask=", self.req['dlr-mask']\ ,"&dlr-url=", urllib2.quote(self.req['dlr-url']))) getPage(url).addCallbacks(callback=lambda value: (println(value)), errback=lambda error: (println("an error occurred : ", url))) COUNTER(self.req['smsc'], self.req['text'], self.req['coding'])
def union(p, q): for url in p: parsed = urlparse(str(url)) if parsed.netloc and parsed.netloc != 'www.webhostingtalk.com': url = 'http://%s/' % parsed.netloc if parsed.netloc and url not in q: print url if parsed.netloc != 'www.webhostingtalk.com': # Insert into Site try: Website.objects.create( url=url, name=parsed.netloc, last_crawled_at=datetime.datetime.utcnow().replace(tzinfo=pytz.utc) ) except IntegrityError: println('%s - already existed in Site' % url) else: # We want to deep crawl webhosting talk q.append(url)
def calculateForOneInput(self, input1, input2): self.neuralNetwork[0].output = 1 #B1 self.neuralNetwork[1].output = input2#I2 self.neuralNetwork[2].output = input1 #I1 #The hidden layer self.neuralNetwork[3].output = 1 #Calculations for H2 self.neuralNetwork[4].sum = ((self.neuralNetwork[0].output * self.weights[0])+(self.neuralNetwork[1].output * self.weights[2])+(self.neuralNetwork[2].output*self.weights[4])) self.neuralNetwork[4].output = self.sigmoid(self.neuralNetwork[4].sum) #Calculations for H1 self.neuralNetwork[5].sum = ((self.neuralNetwork[0].output * self.weights[1])+(self.neuralNetwork[1].output * self.weights[3])+(self.neuralNetwork[2].output*self.weights[5])) self.neuralNetwork[5].output = self.sigmoid(self.neuralNetwork[5].sum) #Calculations for O1 self.neuralNetwork[6].sum = ((self.neuralNetwork[3].output * self.weights[6])+(self.neuralNetwork[4].output * self.weights[7])+(self.neuralNetwork[5].output*self.weights[8])) self.neuralNetwork[6].output = self.sigmoid(self.neuralNetwork[6].sum) print("The output is: " + str(int(round(self.neuralNetwork[6].output)))) #Even with a massive training loop my outputs will never be whole numbers on their own. Help that along by rounding. println()
def send(self): url = "".join(("http://" , self.req['Kannel-host']\ ,":", self.req['Kannel-port']\ ,"/cgi-bin/sendsms?smsc="\ ,self.req['smsc']\ ,"&account=" , self.req['account']\ ,"&limit=" , self.req['limit']\ ,"&udh=" , urllib2.quote(self.req['udh'])\ ,"&from=", urllib2.quote(self.req['from'])\ ,"&to=" , self.req['to']\ ,"&text=", urllib.quote(self.req['text'])\ ,"&coding=", self.req['coding']\ ,"&dlr-mask=", self.req['dlr-mask']\ ,"&dlr-url=", urllib2.quote(self.req['dlr-url']))) getPage(url).addCallbacks(callback=lambda value:(println(value)),errback=lambda error:(println("an error occurred : ", url) )) COUNTER(self.req['smsc'], self.req['text'], self.req['coding'])
def main(argv=None): # IGNORE:C0111 '''Command line options.''' if argv is None: argv = sys.argv else: sys.argv.extend(argv) for a in argv: println(a) if(len(argv) <= 1): println("No arguments, exiting") elif(argv[1] == "challenge6"): c = Challenge6.Challenge6() c.challenge(argv[2:]) elif(argv[1] == "challenge8"): c = Challenge8.Challenge8() c.challenge(argv[2:]) else: println("I don't recognize your argument %s " % argv[0])
def tasks_completed(self): println('Completed! You need to restart your computer.') reactor.stop()
# draw a point sys.stdout.write('.') sys.stdout.flush() for url in urls: next_d = get_page(str(url), enc=encoding, timeout=timeout) next_d.addCallback(page_recvd) next_d.addErrback(on_err) # initial crawl d = get_page(start_page, enc=encoding, timeout=timeout, must_succ=True) d.addCallback(page_recvd) d.addErrback(on_err) return pages_d # defer that will callback with pages if __name__ == '__main__': from policyrunner import make_parse_engines crawl_pages( start_page='http://www.groupon.cn', url_matcher=r'.', encoding='utf-8', timeout=10, max_num=10, parser=make_parse_engines()['pyquery'] ).addCallback(lambda v: (util.println(v), reactor.stop())) reactor.run()
print("文件夹 :" + res[26:] + " === 文件:" + res + '/' + f) # folder_name = res[26:] # file_path = res + '/' + f # os.rename(res + '/' + f, res + '/' + folder_name+'.exe') shutil.move(res + '/' + f, path_p) if __name__ == '__main__': path = '/Users/binny/study/PS/CS6' # file_list = get_file_list(path) # dir_list = clean_list(file_list) # print(len(file_list)) # rename_dir(dir_list) # rename_file(path, dir_list) # print(file_list) # for f in file_list: # if f.find("exe") == -1: # print(path + '/' + f) # # os.remove(path + '/' + f) sums = 300 check = 1.003 checks = 0 for i in range(1, 241): sums = sums * 1.003 + 300 checks = checks + math.pow(check, i) println(i) println(checks * 300) println(sums)
def print_error(error): println("Error:",error) reactor.stop()
try: num_messages = int(sys.argv[4]) except IndexError: num_messages = 1 # build the parameters for the request params = {} params['message'] = message requests = [] # send the requests and print the result for msg_count in range(num_messages): requestor = make_requestor(SERVER_HOST, SERVER_PORT, MAIL_PROTOCOL) d = requestor.request('send', params) d.addCallback(lambda result: println("Result: " + result)) requests.append(d) results = defer.gatherResults(requests) def replay_cb(result): print("Replay Result: " + result) reactor.stop() def replay(_): # try out a replay message requestor = make_requestor(SERVER_HOST, SERVER_PORT, MAIL_PROTOCOL) d = requestor.request('replay', dict()) d.addCallback(replay_cb) results.addCallback(replay) reactor.run()
from txpostgres import txpostgres from twisted.internet import reactor from twisted.python import log, util # connect to the database conn = txpostgres.Connection() d = conn.connect('dbname=postgres') # run the query and print the result d.addCallback(lambda _: conn.runQuery('select tablename from pg_tables')) d.addCallback(lambda result: util.println('All tables:', result)) # close the connection, log any errors and stop the reactor d.addCallback(lambda _: conn.close()) d.addErrback(log.err) d.addBoth(lambda _: reactor.stop()) # start the reactor to kick off connection estabilishing reactor.run()
def apptest(commandID, *args, **kw): print "apptest" d = remote.callRemote(commandID, *args, **kw) print "call remote" d.addCallback(lambda a: util.println(a)) return d
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ This example demonstrates how to use downloadPage. Usage: $ python dlpage.py <url> Don't forget the http:// when you type a URL! """ from twisted.internet import reactor from twisted.web.client import downloadPage from twisted.python.util import println import sys # The function downloads a page and saves it to a file, in this case, it saves # the page to "foo". downloadPage(sys.argv[1], "foo").addCallbacks( lambda value: reactor.stop(), lambda error: (println("an error occurred", error), reactor.stop())) reactor.run()
def get(self): c = self.conn.cursor() d = c.execute('select * from urls') d.addCallback(lambda _: c.fetchone()) d.addCallback(lambda result: util.println('%s' % result)) return d.addCallback(lambda _: c.close())
def disp(self): println(self.id) println(self.name) println(self.description) println(self.meal_category) println(self.meal_parent_category)
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ This example demonstrates how to use downloadPage. Usage: $ python dlpage.py <url> Don't forget the http:// when you type a URL! """ from twisted.internet import reactor from twisted.web.client import downloadPage from twisted.python.util import println import sys # The function downloads a page and saves it to a file, in this case, it saves # the page to "foo". downloadPage(sys.argv[1], "foo").addCallbacks( lambda value: reactor.stop(), lambda error: (println("an error occurred", error), reactor.stop()) ) reactor.run()
from twisted.web.google import checkGoogle from twisted.python.util import println from twisted.internet import reactor import sys checkGoogle(sys.argv[1:]).addCallbacks( lambda l: (println(l), reactor.stop()), lambda e: (println('error:', e), reactor.stop())) reactor.run()
#!/usr/local/bin/python import sys from twisted.internet import reactor from twisted.web.client import getPage from twisted.python.util import println getPage(sys.argv[1]).addCallbacks( callback=lambda value: (println(value), reactor.stop()), errback=lambda error: (println("an error occurred", error), reactor.stop())) reactor.run()
def println_stop_reactor(e): println(e) reactor.stop()
def apptest(commandID,*args,**kw): d = remote.callRemote(commandID,*args,**kw) d.addCallback(lambda a:util.println(a)) return d
self.percentage = float(refactoringChange) / overallChange def addFromIndexOne(self, list): count = 0 for i in range(1, len(list) - 1): count += int(list[i]) return count def isStart(line): return (line.find("=========") != -1) fname = "/home/xige/Desktop/StudyInfo.log" with open(fname) as f: content = f.readlines() allRevisions = list() index = 0 current = Revision() for line in content: if(isStart(line) == True): if(current.hasContent()): current.calculatePercentage() allRevisions.append(current) current = Revision() continue current.addLine(line) allPerc = 0.0 for revision in allRevisions: allPerc += revision.percentage println("Results:" + str(allPerc/len(allRevisions)))
#!/usr/bin/python __author__ = 'kilroy' # (c) 2014, WasHere Consulting, Inc. # Written for Infinite Skills from twisted.internet import reactor from twisted.web.client import getPage from twisted.python.util import println getPage("http://www.microsoft.com/").addCallbacks( callback=lambda value:(println(value),reactor.stop()), errback=lambda error:(println("an error occurred", error),reactor.stop())) reactor.run()
from twisted.internet import reactor from twisted.python import log, util def dict_connect(*args, **kwargs): kwargs['connection_factory'] = psycopg2.extras.DictConnection return psycopg2.connect(*args, **kwargs) class DictConnection(txpostgres.Connection): connectionFactory = staticmethod(dict_connect) # connect using the custom connection class conn = DictConnection() d = conn.connect('dbname=postgres') # run a query and print the result d.addCallback(lambda _: conn.runQuery('select * from pg_tablespace')) # access the column by its name d.addCallback(lambda result: util.println('All tablespace names:', [row['spcname'] for row in result])) # close the connection, log any errors and stop the reactor d.addCallback(lambda _: conn.close()) d.addErrback(log.err) d.addBoth(lambda _: reactor.stop()) # start the reactor to kick off connection estabilishing reactor.run()
from twisted.web.google import checkGoogle from twisted.python.util import println from twisted.internet import reactor import sys checkGoogle(sys.argv[1:]).addCallbacks( lambda l:(println(l),reactor.stop()), lambda e:(println('error:',e),reactor.stop())) reactor.run()
def connectionLost(self, reason): util.println('client disconnected') self.observer.removeHandler(self)
def print_page(data): println(data) reactor.stop()
urls = spider.give_all_jobs() # draw a point sys.stdout.write('.') sys.stdout.flush() for url in urls: next_d = get_page(str(url), enc=encoding, timeout=timeout) next_d.addCallback(page_recvd) next_d.addErrback(on_err) # initial crawl d = get_page(start_page, enc=encoding, timeout=timeout, must_succ=True) d.addCallback(page_recvd) d.addErrback(on_err) return pages_d # defer that will callback with pages if __name__ == '__main__': from policyrunner import make_parse_engines crawl_pages(start_page='http://www.groupon.cn', url_matcher=r'.', encoding='utf-8', timeout=10, max_num=10, parser=make_parse_engines()['pyquery']).addCallback( lambda v: (util.println(v), reactor.stop())) reactor.run()
try: num_messages = int(sys.argv[4]) except: num_messages = 1 # build the parameters for the request params = {} params['message'] = message requests = [] # send the requests and print the result for msg_count in range(num_messages): requestor = make_requestor(SERVER_HOST, SERVER_PORT, MAIL_PROTOCOL) d = requestor.request('send', params) d.addCallback(lambda result: println("Result: " + result)) requests.append(d) results = defer.gatherResults(requests) def replay_cb(result): print("Replay Result: " + result) reactor.stop() def replay(_): # try out a replay message requestor = make_requestor(SERVER_HOST, SERVER_PORT, MAIL_PROTOCOL) d = requestor.request('replay', dict()) d.addCallback(replay_cb) results.addCallback(replay) reactor.run()
def apptest(commandId, *args, **kwargs): d = remote.call_remote(commandId, *args, **kwargs) d.addCallback(lambda a: util.println(a)) return d