def load(): queue=Queue() hostfile_line=open(sys.argv[2],'r').readlines() source=sys.argv[3] destdir=sys.argv[4] for hostfile in hostfile_line: eachline=hostfile.split() queue.put(eachline) eachline=Process(target=TRANS,args=(queue.get(),source,destdir)) eachline.start() eachline.join()
def exe(): queue=Queue() hostfile_line=open(sys.argv[2],'r').readlines() command_file=open(sys.argv[3],'r').readlines() for command_line in command_file: command_list=command_line.split('\n') command=''.join(command_list) for hostfile in hostfile_line: eachline=hostfile.split() queue.put(eachline) eachline=Process(target=SSH,args=(queue.get(),str(command))) eachline.start() eachline.join()
def cmdrun(self, cmd): comScanCmd = cmd queue = Queue() scanProc = Process( target=self.newProcExecuteCmd, args=[queue, comScanCmd]) scanProc.start() # 等待5秒 scanProc.join(10) try: scanResult = queue.get(timeout=5) except Exception as e: print "get cmd result error" scanResult = -1 scanProc.terminate() return scanResult
def cmdrun(self, cmd): try: comScanCmd = cmd queue = Queue() scanProc = Process(target=self.newProcExecuteCmd, args=[queue, comScanCmd]) scanProc.start() scanProc.join(5) try: scanResult = queue.get(timeout=30) #print scanResult except Exception, e: print e print "get cmd result error: %s " % str(e) scanResult = -1 scanProc.terminate() return scanResult
def test_possible_to_share_sqlalchemy_objects_across_process_boundaries_with_memcache(self): # write to the memcache in another process def write_to_cache(): # create storage for subprocess mc = MemcacheStore(servers=[_MEMCACHED_HOST]) sa = SqlAlchemyStore(uri="sqlite:///%s" % _SQLITE_FILENAME) ch = ChainedStore([mc, sa]) # store a MultiprocBazbot object for retrieval in the main process bb = MultiprocBazbot(1, 2, 999) ch.put(bb) p = Process(target=write_to_cache) p.start() p.join() # create storage in the main process mc = MemcacheStore(servers=[_MEMCACHED_HOST]) sa = SqlAlchemyStore(uri="sqlite:///%s" % _SQLITE_FILENAME) ch = ChainedStore([mc, sa]) # FIXME: these unit tests should be valid, but for some reason running the Shove unit tests beforehand interacts with this # # accessing the MemcacheStore directly will not work # # since the ChainedStore is the only thing that can # # conceptually link it to the SqlAlchemyStore # bb_mc = mc.get(MultiprocBazbot, {"zap": 999}) # self.assert_(isinstance(bb_mc, MultiprocBazbot)) # self.assertRaises(Exception, lambda: bb_mc.zap) # self.assertRaises(Exception, lambda: bb_mc.foo) # self.assertRaises(Exception, lambda: bb_mc.bar) # self.assertRaises(Exception, lambda: bb_mc.bleh) # accessing the ChainedStore will work since it allows # the SqlAlchemyStore to execute its merge callbacks bb = ch.get(MultiprocBazbot, {"zap": 999}) self.assert_(bb) self.assert_(isinstance(bb, MultiprocBazbot)) self.assertEqual(bb.zap, 999) self.assertEqual(bb.foo, 1) self.assertEqual(bb.bar, 2) self.assertEqual(bb.bleh, "bleh")
def run_code(): if not request.args: abort(400) pycode = request.args.get('code', '') if ("__class__" in pycode) or ("_module" in pycode): return jsonify("timed out! you have an infinite loop!") pysplit = pycode.splitlines() # print(pycode, file=sys.stderr) p = Process(target=exec, args=(pycode, myglobals)) p.start() p.join(2) p.terminate() if p.exception: if p.exception == 1: return jsonify("no error!") tb = p.exception[1] if isinstance(p.exception[0], SyntaxError): return getSynTraceback(filename, pysplit, tb) return getTraceback(filename, pysplit, tb) return jsonify("timed out! you have an infinite loop!")
print "Process Number: %s didn't find a response for %s " % (i, ip) pass def snmp_query(i,out): while True: time.sleep(.1) if out.empty(): sys.exit() print "Process Number: %s" % i ipaddr = out.get() s = Snmp() h = HostRecord() h.ip = ipaddr h.snmp_response = s.query() print h return h try: q.putmany(ips) finally: for i in range(num_workers): p = Process(target=f, args=[i,q,oq]) p.start() for i in range(num_workers): pp = Process(target=snmp_query, args=[i,oq]) pp.start() print "main process joins on queue" p.join() print "Main Program finished"
ips = IP("10.0.1.0/24") def f(i, q): while True: if q.empty(): sys.exit() print "Process Number: %s" % i ip = q.get() ret = subprocess.call("ping -c 1 %s" % ip, shell=True, stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT) if ret == 0: print "%s: is alive" % ip else: print "Process Number: %s didn't find a response for %s" % (i, ip) for ip in ips: q.put(ip) #q.put("192.168.1.1") for i in range(50): p = Process(target=f, args=[i, q]) p.start() print "main process joins on queue" p.join() print "Main Program finished"
#/usr/bin/env python # -*- coding: utf-8 -*- from processing import Process, Queue import time def f(q): x = q.get() print "Process number %s, sleeps for %s seconds" %(x, x) time.sleep(x) print "Process number %s finished" % x q = Queue() for i in range(10): q.put(i) i = Process(target=f, args=[q] i.start() print "main process joins on queue" i.join() print "Main Program finished"
for i in range(PLATFORMDIVISION): start_index.append(start) end = start + bids_number_pertime if bids_number_reminder > 0: end += 1 bids_number_reminder -= 1 end_index.append(end) start = end cur_db_list = getCursors(conn_db, n=PLATFORMDIVISION) print cur_db_list #重复白名单 platform_id_white_list = getPlatformIdList("repeatwhiteplatform_id.txt") for i in range(PLATFORMDIVISION): j=Process(target=readSQLReturn,args=[[start_index[i], end_index[i], cur_db_list[i]]]) j.start() j.join() print "共有" + str(bids_number) + "个标." stringSQL = "SELECT count(*) FROM " + dstdb_info cur_db.execute(stringSQL) good_number = cur_db.fetchone()[0] print "在满额限制为"+ str(FULLBIDPERCENT) + "的情况下, 只有" + "%.2f" % (100 * (float(good_number)/bids_number)) + "%的数据是可用的." #changeValue("./clean_date.xml","clean_date_lasttime",clean_date_thistime) fp.close() #将所有的platform_id输出到文件 fp = open("platform_id_list_info.txt","w") for platform_id in platform_id_set: fp.write(str(platform_id)) fp.write("\n") fp.close() closeCursors(cur_db) closeConns(conn_db)
def algo(request, algo): text = "" type = "" algo_object = get_object_or_404(Algo, shortTitle=algo) manual = get_object_or_404(ManPage, algo=algo_object) embedFormDict = { 'cpt' : CPTEmbedForm, 'f5' : F5EmbedForm, 'lsb' : LsbEmbedForm, 'gifshuffle' : GifShuffleEmbedForm, 'bs' : BattlestegEmbedForm, } extractFormDict = { 'cpt' : CPTExtractForm, 'f5' : F5ExtractForm, 'lsb' : LsbExtractForm, 'gifshuffle' : GifShuffleExtractForm, 'bs' : BattlestegExtractForm, } typeDict = { 'cpt' : "png", 'f5' : "jpeg", 'lsb' : "png", 'gifshuffle' : "gif", 'bs' : "png", } if request.method == 'POST': q = Queue() # embedding if "submit1" in request.POST: algoDict = { 'cpt' : cptEmbed, 'f5' : f5Embed, 'lsb' : lsbEmbed, 'gifshuffle' : gifShuffleEmbed, 'bs' : bsEmbed, } embedForm = embedFormDict[algo](request.POST, request.FILES) extractForm = extractFormDict[algo]() type = typeDict[algo] p = Process(target=algoDict[algo], args=(q, )) # fork process to embed if embedForm.is_valid(): p.start() q.put([request.POST, request.FILES['file'].temporary_file_path()]) os.system("sleep 1") try: retval = q.get(True, 10) except Q.Empty: retval = -2 p.join() if retval == -1: text += "%s-Datei nicht gefunden oder fehlerhaft."%(type) elif retval == -2: text += "Fehler beim Einbetten. Anderes Bild oder andere Parameter versuchen." else: return createResponse(retval, type) # extracting elif "submit2" in request.POST: algoDict = { 'cpt' : cptExtract, 'f5' : f5Extract, 'lsb' : lsbExtract, 'gifshuffle' : gifShuffleExtract, 'bs' : bsExtract, } embedForm = embedFormDict[algo]() extractForm = extractFormDict[algo](request.POST, request.FILES) type = typeDict[algo] p = Process(target=algoDict[algo], args=(q, )) # fork process to extract if extractForm.is_valid(): p.start() q.put([request.POST, request.FILES['file'].temporary_file_path()]) try: retval = q.get(True, 10) except Q.Empty: retval = -2 p.join() if retval == -1: text += "%s-Datei nicht gefunden oder fehlerhaft."%(type) elif retval == -2: text += "Fehler beim Ausbetten. Anderes Bild oder andere Parameter versuchen." else: #print retval text += retval # empty form else: embedForm = embedFormDict[algo]() extractForm = extractFormDict[algo]() # render return render_to_response("stego_algo.html", {'algo' : algo_object, 'embedForm' : embedForm, 'extractForm' : extractForm, 'text' : text, 'algo_type' : 'Staganographie', 'manual' : manual,})
task_id = 0 for i in range(len(strategy_args)): args = strategy_args[i] for day in days: task = {'strategy_id': i, 'strategy_args': args, 'day': day.timetuple()[:3], 'ticker': ticker_details, 'task_id': task_id} tasks[task_id] = task task_id += 1 # put the tasks in the queue queue.putmany(tasks.values()) print "%s: start analyzing %s tasks" % (str(datetime.now()), len(tasks)) for i in range(len(tasks)): result_dict = result.get() for task_id, report in result_dict.items(): tsk = tasks.get(task_id) tsk.update({'report': report}) print "analyzed task: %s" % task_id p1.stop(); p2.stop() p1.join(); p2.join() # store the pickled tasks in a file for later analysis fname = "../simulations/first_run_%s" % start_time.strftime("%Y%m%d%H%M%S") f = open(fname, 'w') pickle.dump(tasks, f, pickle.HIGHEST_PROTOCOL) f.close() print "output written to: %s" % fname
start_index.append(start) end = start + bids_number_pertime if bids_number_reminder > 0: end += 1 bids_number_reminder -= 1 end_index.append(end) start = end cur_db_list = getCursors(conn_db, n=PLATFORMDIVISION) print cur_db_list #重复白名单 platform_id_white_list = getPlatformIdList("repeatwhiteplatform_id.txt") for i in range(PLATFORMDIVISION): j = Process(target=readSQLReturn, args=[[start_index[i], end_index[i], cur_db_list[i]]]) j.start() j.join() print "共有" + str(bids_number) + "个标." stringSQL = "SELECT count(*) FROM " + dstdb_info cur_db.execute(stringSQL) good_number = cur_db.fetchone()[0] print "在满额限制为" + str(FULLBIDPERCENT) + "的情况下, 只有" + "%.2f" % ( 100 * (float(good_number) / bids_number)) + "%的数据是可用的." #changeValue("./clean_date.xml","clean_date_lasttime",clean_date_thistime) fp.close() #将所有的platform_id输出到文件 fp = open("platform_id_list_info.txt", "w") for platform_id in platform_id_set: fp.write(str(platform_id)) fp.write("\n") fp.close() closeCursors(cur_db)