def cleanupCalamari(): try: zypperutils.removePkg('calamari-server-test calamari-clients calamari-server', os.environ["CALAMARI_NODE"]) except Exception as e: log.warning("Error while removing ceph-deploy "+str(sys.exc_info()[0])) cmd = 'ssh %s sudo rcpostgresql stop' % (os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: log.warning("Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr)) cmd = 'ssh %s sudo rm -rf /var/lib/pgsql/data' % (os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: log.warning("Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr)) cmd = 'ssh %s sudo rm -rf /usr/lib/python2.7/site-packages/calamari-server-test/' % (os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: log.warning("Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr)) cmd = 'ssh %s sudo rm expect /tmp/calamari_cluster.yaml /tmp/test.conf' % (os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: log.warning("Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr))
def createS3TestsUsers(rgw_node, rgw_name): config = SafeConfigParser() config.read('s3tests/s3-tests.conf') s3main_list = config.items('s3 main')[3:] s3main_data = {} s3main_data['client_key'] = "client.radosgw.%s" % (rgw_name) for i in range (len(s3main_list)): s3main_data[s3main_list[i][0]] = s3main_list[i][1] s3main_usercreate_cmd = 'sudo radosgw-admin -n {client_key} user create --uid={user_id} --display-name=\"{display_name}\" \ --email={email} --access_key={access_key} --secret={secret_key} --key-type s3'.format(**s3main_data) cmd = "ssh %s %s" % (rgw_node, s3main_usercreate_cmd) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while creating s3 main user'. Error message: '%s'" % (stderr) s3alt_list = config.items('s3 alt')[3:] s3alt_data = {} s3alt_data['client_key'] = "client.radosgw.%s" % (rgw_name) for i in range (len(s3alt_list)): s3alt_data[s3alt_list[i][0]] = s3alt_list[i][1] s3alt_usercreate_cmd = 'sudo radosgw-admin -n {client_key} user create --uid={user_id} --display-name=\"{display_name}\" \ --email={email} --access_key={access_key} --secret={secret_key} --key-type s3'.format(**s3alt_data) cmd = "ssh %s %s" % (rgw_node, s3alt_usercreate_cmd) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while creating s3 alt user'. Error message: '%s'" % (stderr)
def initializeCalamari(): cmd = "ssh %s sudo systemctl restart apache2.service" % (os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) cmd = "scp utils/expect %s:~" % (os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) cmd = "ssh %s sudo chmod 755 expect" % (os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) cmd = "ssh %s sudo ./expect" % (os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) log.info(stdout) #cmd = 'ssh %s sudo rcapache2 restart' % (os.environ["CALAMARI_NODE"]) #rc,stdout,stderr = launch(cmd=cmd) #assert (rc == 0), "Error while executing the command %s.\ #Error message: %s" % (cmd, stderr)#this bug got fixed #893351 cmd = 'ssh %s sudo wget -O /dev/null http://localhost/' % (os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr)
def createValidateObject(dictObject): name = dictObject.get('objname', None) filename = dictObject.get('objname', None)+'.txt' pool = dictObject.get('pool', None) #fo = open(filename, "w") #fo.close() cmd = "ssh %s touch %s " %(os.environ["CLIENTNODE"], filename) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) cmd = "ssh %s rados put %s %s --pool=%s" % (os.environ["CLIENTNODE"],name,filename,pool) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) #os.remove(filename) cmd = "ssh %s rm %s" %(os.environ["CLIENTNODE"], filename) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) cmd = "ssh %s rados -p %s ls" % (os.environ["CLIENTNODE"],pool) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) objlist = stdout.split('\n') assert (name in objlist),"object %s could not be created" % (name) log.info("created object %s " % (name)) cmd = "ssh %s ceph osd map %s %s" % (os.environ["CLIENTNODE"],pool,name) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) log.info("the objectdetails are - %s " % (stdout))
def actionOnCephService(node, action): #action - start|stop|restart cmd = "ssh %s sudo ls /etc/init.d/ceph-dummy" % (node) #temporary dummy workaround rc,stdout,stderr = launch(cmd=cmd) if rc != 0: log.info('performing action on ceph service with systemd') if action != 'start': cmd = "ssh %s sudo systemctl list-units --type service | grep ceph | grep -v failed" % (node) else: cmd = "ssh %s sudo systemctl list-units --type service --all | grep ceph | grep inactive" % (node) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) all_services = stdout.split("\n") list_services = [] for service in all_services: list_services.append(service.split(" ")[0]) assert (len(list_services) > 1), "no systemd service found for ceph" for i in range(len(list_services)-1): cmd = "ssh %s sudo systemctl %s %s" % (node, action, list_services[i]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) else: log.info('performing action on ceph services with sysV') cmd = "ssh %s sudo /etc/init.d/ceph %s" % (node, action) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr)
def main(): os.environ["FOLDER"] = "benchmarks" os.environ["SAFT"] = "0.5" launch.launch([ "./configs/20_ormh.csv", "./configs/benchmark.csv", ], mst_adder)
def __init__(self, ships, num_ships=1, num_waves=1): Director.Mission.__init__(self) you = VS.getPlayer() for i in range(len(ships)): launch.launch( you.getFlightgroupName(), you.getFactionName(), ships[i], "ai_sitting_duck.py", num_ships, num_waves, Add(you.Position(), ((i + 1) * 500, 0, 0)), "", False, ) print((" ++ Balancer mission launching %s unit..." % ships[i])) launch.launch( you.getFlightgroupName(), you.getFactionName(), "Archimedes", "ai_sitting_duck.py", num_ships, num_waves, Add(you.Position(), (0, 0, 8000)), "", False, ) print(" ++ Balancer mission launching the playground")
def main(): parser = argparse.ArgumentParser(description='Alpha zero') parser.add_argument('-c', '--config', default='../configs/config.py', type=str) parser.add_argument('-m', '--model', default=None, type=str) args = parser.parse_args() config, text = load_config(args.config) os.makedirs(config['work_dir'], exist_ok=True) set_logger(log_file=config['work_dir'] + '/train.log') with open(config['work_dir'] + '/config.py', 'w') as f: f.write(text) self_play_config = config['self_play_config'] model_config = config['model_config'] train_config = config['train_config'] logging.info(f'launch: \n{text}') launch(self_play_config=self_play_config, train_config=train_config, model_config=model_config, work_dir=config['work_dir'], save_play_history=config['save_play_history'])
def createSwiftTestsUsers(rgw_node, rgw_name): config = SafeConfigParser() config.read('swifttests/swift-tests.conf') swift_list = config.items('func_test') swift_data = {} swift_data['client_key'] = "client.radosgw.%s" % (rgw_name) for i in range (len(swift_list)): swift_data[swift_list[i][0]] = swift_list[i][1] swift_acc1_cmd = 'sudo radosgw-admin -n {client_key} user create --subuser={account}:{username} --display-name=\"{display_name}\" \ --email={email} --secret={password} --key-type swift --access=full'.format(**swift_data) cmd = "ssh %s %s" % (rgw_node, swift_acc1_cmd) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while creating swift account1 user'. Error message: '%s'" % (stderr) swift_acc2_cmd = 'sudo radosgw-admin -n {client_key} user create --subuser={account2}:{username2} --display-name=\"{display_name2}\" \ --email={email2} --secret={password2} --key-type swift --access=full'.format(**swift_data) cmd = "ssh %s %s" % (rgw_node, swift_acc2_cmd) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while creating swift account2 user'. Error message: '%s'" % (stderr)
def restartRadosGW(node): cmd = "ssh %s sudo ls /etc/init.d/ceph" % (node) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: log.info('restarting radosgw with systemd') cmd = "ssh %s sudo systemctl list-units --type service | grep ceph-radosgw | grep -v failed" % (node) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) all_services = stdout.split("\n") list_services = [] for service in all_services: list_services.append(service.split(" ")[0]) assert (len(list_services) > 1), "no systemd service found for radosgw" for i in range(len(list_services)-1): cmd = "ssh %s sudo systemctl restart %s" % (node, list_services[i]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) else: log.info('restarting radosgw with sysV') cmd = "ssh %s sudo /etc/init.d/ceph-radosgw restart" % (node) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr)
def __init__(self, ships, num_ships=1, num_waves=1): Director.Mission.__init__(self) you = VS.getPlayer() for i in xrange(len(ships)): launch.launch(you.getFlightgroupName(), you.getFactionName(), ships[i], "ai_sitting_duck.py", num_ships, num_waves, Add(you.Position(), ((i + 1) * 500, 0, 0)), '', False) print(" ++ Balancer mission launching %s unit..." % ships[i]) launch.launch(you.getFlightgroupName(), you.getFactionName(), "Archimedes", "ai_sitting_duck.py", num_ships, num_waves, Add(you.Position(), (0, 0, 8000)), '', False) print(" ++ Balancer mission launching the playground")
def Start(self,carrier): self.inflight=0 self.origin = findOriginAndMove(carrier).Position(); self.carrier=carrier self.wingman= launch.launch (VS.getPlayer().getFlightgroupName(),"confed","nova","default",1,1,Add((1000,200,0),self.origin)) self.nav=[] self.visited=[0,0,0] self.launched=[0,0,0] self.nav+=[launch.launch("nav1","neutral","navpoint","sitting_duck",1,1,Add(self.origin,(100000,0,50000)))] self.nav+=[launch.launch("nav2","neutral","navpoint","sitting_duck",1,1,Add(self.origin,(0000,0,100000)))] self.roids = [VS.launch("Asteroids","AFieldSparse","neutral","asteroid","default",1,1,Add(self.origin,(0000,0,100000)),""), VS.launch("Asteroids","AFieldThin","neutral","asteroid","default",1,1,Add(self.origin,(-10000,0,50000)),"")] self.nav+=[launch.launch("nav3","neutral","navpoint","sitting_duck",1,1,Add(self.origin,(-100000,0,48000)))]
def restartCeph(node): cmd = "ssh %s sudo ls /etc/init.d/ceph" % (node) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: log.warn('this is not a systemV ceph. using systemd restart') cmd = "ssh %s sudo rcceph restart" % (node) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) return cmd = "ssh %s sudo /etc/init.d/ceph restart" % (node) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr)
def Start(self,carrier): self.inflight=0 self.jump = findOriginAndMove(carrier,(0,0,-20000)); self.carrier=carrier self.origin = carrier.Position(); self.wingman= launch.launch (VS.getPlayer().getFlightgroupName(),"confed","nova","default",1,1,Add((1000,200,0),self.origin)) self.transport= launch.launch ("Transport","confed","cargoship","default",1,1,Add((-1000,200,0),self.origin)) self.transport.ActivateJumpDrive(0) self.transport.SetTarget(self.jump) self.launched=[0,0] self.nav=[launch.launch("nav1","neutral","eject","sitting_duck",1,1,Add(self.origin,(20000,000,10000))), self.jump] self.roids = [VS.launch("Asteroids","AFieldSparse","neutral","asteroid","default",1,1,Add(self.origin,(0000,00000,10000)),"")]
def addAdminNodes(listNodes): if len(listNodes) < 1: log.error("install nodes list not provided in the yaml file") raise Exception, "install nodes list not provided in the yaml file" strlistNodes = " ".join(listNodes) cmd = 'ssh %s ceph-deploy admin %s' % (os.environ["CLIENTNODE"], strlistNodes) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr) for node in listNodes: cmd = 'ssh %s sudo chmod +r /etc/ceph/ceph.client.admin.keyring' % (node) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr)
def copyClusterConf(yamlfile): cmd = 'scp utils/test.conf %s:/tmp' % (os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) cmd = 'ssh %s sudo cp /tmp/test.conf /usr/lib/python2.7/site-packages/calamari-server-test/tests' % (os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) cmd = 'scp %s %s:/tmp/calamari_cluster.yaml' % (yamlfile, os.environ["CALAMARI_NODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr)
def getTotalPGs(): cmd = "ssh %s ceph pg stat| awk '{print $2;}'" % (os.environ["CLIENTNODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) log.info("the total PGS \n"+stdout.strip()) return str(stdout).strip()
def validateLibRbdTests(): cmd = "cat librbd_tests.py | ssh %s python" % (os.environ["CLIENTNODE"]) rc,stdout,stderr = launch(cmd=cmd) for output in stdout.split('\n'): log.info(output) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr)
def getOSDtree(): cmd = 'ssh %s ceph osd tree' % (os.environ["CLIENTNODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) log.info("the osd tree is \n"+stdout.strip()) return str(stdout).strip()
def getDefaultPools(): cmd = 'ssh %s ceph osd lspools' % (os.environ["CLIENTNODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) log.info("the default pools are "+stdout.strip()) return str(stdout).strip()
def getquorum_status(): cmd = 'ssh %s ceph quorum_status' % (os.environ["CLIENTNODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) log.info("the quorum_status is "+stdout.strip()) return str(stdout).strip()
def getCephStatus(): cmd = 'ssh %s ceph --status' % (os.environ["CLIENTNODE"]) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while executing the command '%s'. \ Error message: '%s'" % (cmd, stderr) log.info('ceph status is - '+stdout) return stdout.strip()
def getCephHealth(): cmd = 'ssh %s ceph health' % (os.environ["CLIENTNODE"]) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while executing the command '%s'. \ Error message: '%s'" % (cmd, stderr) log.info('Ceph health is - '+stdout) return stdout
def changePoolReplica(dictPool): poolname = dictPool.get('poolname', None) size = dictPool.get('size', None) cmd = "ssh %s ceph osd pool set %s size %s" % (os.environ["CLIENTNODE"], poolname, size) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) log.info("changed the pool - %s to replica size %s" % (poolname, size))
def createInitialMons(listMons): if len(listMons) < 1: log.error("initial mons list not provided in the yaml file") raise Exception, "initial mons list not provided in the yaml file" cmd = 'ssh %s ceph-deploy --overwrite-conf mon create-initial' % (os.environ["CLIENTNODE"]) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr)
def deletePool(dictPool): poolname = dictPool.get('poolname', None) cmd = "ssh %s ceph osd pool delete %s %s --yes-i-really-really-mean-it" % (os.environ["CLIENTNODE"],poolname,poolname) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) poollist = stdout#.split(',') assert (poolname not in poollist), "pool %s was not deleted in %s" % (poolname,poollist)
def verifyRGWList(rgw_host, rgw_name): cmd = "ssh %s ceph-deploy rgw list" % (os.environ["CLIENTNODE"]) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: log.error("error while getting rgw list") raise Exception, "Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr) log.info("rgw list output %s" % stdout.strip()) rgw_list = stdout.strip() assert (rgw_host+':'+rgw_name in rgw_list), "gateway name was not found in rgw list"
def CreateOSDs(listOSDs): if len(listOSDs) < 1: log.error("OSDs list not provided in the yaml file") raise Exception, "OSDs list not provided in the yaml file" for osd in listOSDs: cmd = 'ssh %s ceph-deploy osd create %s' % (os.environ["CLIENTNODE"], osd) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr)
def getFSID(): cmd = 'ssh %s ceph-conf --lookup fsid' % (os.environ["CLIENTNODE"]) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while executing the command '%s'. \ Error message: '%s'" % (cmd, stderr) fsid = stdout.strip() log.info('ceph fsid is - '+fsid) return fsid
def create_rgw(rgw_host, rgw_name): deleteOldRgwData(rgw_host) cmd = "ssh %s ceph-deploy --overwrite-conf rgw create %s:%s"\ % (os.environ["CLIENTNODE"], rgw_host, rgw_name) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: log.error("error while creating rgw %s on %s " % (rgw_name, rgw_host)) raise Exception, "Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr) log.info("created rgw %s on %s " % (rgw_name, rgw_host)) time.sleep(20) cmd = "curl %s"% (rgw_host) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr) anonymus_op = '<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult \ xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID>\ <DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>' assert (stdout.strip()==anonymus_op), "gateway did not give proper response"
def declareInitialMons(listMons): if len(listMons) < 1: log.error("initial mons list not provided in the yaml file") raise Exception, "initial mons list not provided in the yaml file" monlist = " ".join(listMons) cmd = 'ssh %s ceph-deploy new %s' % (os.environ["CLIENTNODE"], monlist) rc,stdout,stderr = launch(cmd=cmd) if rc != 0: raise Exception, "Error while executing the command '%s'. Error message: '%s'" % (cmd, stderr)
def removeObject(dictObject): name = dictObject.get('objname', None) filename = dictObject.get('objname', None)+'.txt' pool = dictObject.get('pool', None) cmd = "ssh %s rados -p %s ls" % (os.environ["CLIENTNODE"],pool) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) objlist = stdout.split('\n') if (name not in objlist): log.warning("object %s does not exist" % (name)) return cmd = "ssh %s rados rm %s --pool=%s" % (os.environ["CLIENTNODE"],name,pool) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.\ Error message: %s" % (cmd, stderr) objlist = stdout.split('\n') assert (name not in objlist),"object %s could not be removed" % (name) log.info("removed the object - %s " % (name))
def setPGNUM(pg_num): total_pgs = 0 cmd = "ssh %s rados lspools" % (os.environ["CLIENTNODE"]) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.Error message: %s" % (cmd, stderr) pools = stdout.split("\n") for pool in pools: cmd = "ssh %s ceph osd pool set %s pg_num %s" % (os.environ["CLIENTNODE"],pool.strip(),pg_num) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.Error message: %s" % (cmd, stderr) total_pgs = total_pgs + int(pg_num) time.sleep(15) for pool in pools: cmd = "ssh %s ceph osd pool set %s pgp_num %s" % (os.environ["CLIENTNODE"],pool.strip(),pg_num) rc,stdout,stderr = launch(cmd=cmd) assert (rc == 0), "Error while executing the command %s.Error message: %s" % (cmd, stderr) actual_pgs = monitoring.getTotalPGs() #from utils import monitoring assert (int(actual_pgs) == int(total_pgs)), "All PGs were not created"
def LaunchNext (fg, fac, type, ai, pos, logo,newshp=[None],fgappend=''): debug.info("Launch nexting "+str(type)) combofg=fg+fgappend if (fgappend=='Base'): combofg=fgappend newship = launch.launch (combofg,fac,type,ai,1,1,pos,logo) dynamic_universe.TrackLaunchedShip(fg,fac,type,newship) rad=newship.rSize () #VS.playAnimation ("warp.ani",pos,(3.0*rad)) newshp[0]=newship return NextPos (newship,pos)
def Start(self, carrier): self.inflight = 0 self.jump = findOriginAndMove(carrier, (0, 0, -20000)) self.carrier = carrier self.origin = carrier.Position() self.wingman = launch.launch(VS.getPlayer().getFlightgroupName(), "confed", "nova", "default", 1, 1, Add((1000, 200, 0), self.origin)) self.transport = launch.launch("Transport", "confed", "cargoship", "default", 1, 1, Add((-1000, 200, 0), self.origin)) self.transport.ActivateJumpDrive(0) self.transport.SetTarget(self.jump) self.launched = [0, 0] self.nav = [ launch.launch("nav1", "neutral", "eject", "sitting_duck", 1, 1, Add(self.origin, (20000, 000, 10000))), self.jump ] self.roids = [ VS.launch("Asteroids", "AFieldSparse", "neutral", "asteroid", "default", 1, 1, Add(self.origin, (0000, 00000, 10000)), "") ]
def __init__ (self): Director.Mission.__init__(self) self.campaign={("vega_sector/enyo",0):wc1_mis0.wc1_mis0(), ("vega_sector/vega",0):wc1_mis1.wc1_mis1()} self.sector=save_util.loadStringList (0,"wc1sector") if (self.sector==[]): self.sector = "vega_sector/enyo"; else: [self.sector]=self.sector self.curmission=0 if (self.sector==""): self.sector="vega_sector/enyo" self.mission=Director.getSaveData(0,"wc1mission",0) self.carrier = launch.launch ("BengalClass","confed","fleetcarrier","default",1,1,(0,0,0)) self.wfm="" self.StartMission(VS.getSystemFile(),self.sector,self.mission)
def process_usr_query(source): # print('####### PROCESSING SOURCE {} #######'.format(source)) user_data = load_json(source) # print(' ####### PARAMS {} #######'.format(user_data.get(PARAMS_SOURCE))) # print(' ####### OUTFILE {} #######'.format(user_data.get(FNAME))) result = launch.launch(source=user_data.get(PARAMS_SOURCE), task=user_data[TASK], out_format=user_data.get(OUTF, 'CSV'), span=user_data.get(SPAN, 30), out_name=user_data.get(FNAME), demo=user_data[DEMO]) msg = user_data[LAUNCH_TEXT] + '\n\n' + user_data[PARAMS_TEXT] + '\n\n' + result msg_file = '{}.txt'.format(user_data[USRID]) with open(msg_file, 'w') as handler: handler.write(msg) write_emailscript(user_data, msg_file) # write_emailscript(user_data, msg) os.system('chmod +x emailbashscript') os.system('./emailbashscript') os.remove(msg_file) # print('####### REMOVING SOURCE {} #######'.format(source)) os.remove(source) if user_data.get(FNAME): zip_path = os.path.join('data', user_data[FNAME] + '.zip') f_path = os.path.join('data', user_data[FNAME] + user_data[EXTENSION]) try: # print('####### DELETING OUTPUT {} #######'.format(f_path)) os.remove(f_path) os.remove(zip_path) except FileNotFoundError: pass if user_data.get(PARAMS_SOURCE): try: # print('####### DELETING PARAMS {} #######'.format(user_data[PARAMS_SOURCE])) os.remove(user_data[PARAMS_SOURCE]) except FileNotFoundError: pass
#!/usr/bin/env python import launch import os os.environ.setdefault("FOLDER", "benchmarks") os.environ.setdefault("SAFT", "0.5") os.environ.setdefault("MAX_COLUMNS", "400") launch.launch([ "./configs/30_ormh.csv_downsampled_32.csv", "./configs/30_ormh.csv_downsampled_16.csv", "./configs/30_ormh.csv_downsampled_8.csv", ])
#descrpition:程序主入口 import time from transitions import Machine from surround import surround from back import back from land import land from launch import launch #from surround import transitions, states # model = surround() # model2 = back() #分别创建四个实例 surrounder = surround() backer = back() lander = land() launcher = launch() #状态定义 status = ['READY','WAITING','WORKING','DONE'] #设置状态转移 transitions = [ {'trigger': 'initialed', 'source': 'READY', 'dest': 'WAITING' }, {'trigger': 'wating_over', 'source': 'WAITING', 'dest': 'WORKING' }, {'trigger': 'work_done', 'source': 'WORKING', 'dest': 'DONE' }, {'trigger': 'ready','source':'DONE','dest':'READY'} ] #分别设置各自的状态机 surrounder_machine =Machine(model = surrounder,states=status,transitions=transitions,initial='READY')
#cmd = "bench/score -bname 2 -gnum 2 -grate 3400000 -gtime 30 -adir phish" #phcmd = "python /home/sjplimp/phish/bait/bait.py" + \ # " -v p 2 -v f tmp -v k 4 -v a 2 -b mpi in.parallel" #cmd += ' -acmd "%s"' % phcmd #cmd += ' -apost "python post_phish.py tmp.*"' #cmd += " -id %s.bench2.phish.parallel.c++" % date #launch(shlex.split(cmd)) # MP runs cmd = "bench/score -bname 1 -gnum 5 -grate 12000000 -gtime 5 -adir mp" mpcmd = 'metaproc "udpgen_in_buf -n 5 -U 55555 | keyadd_initial_custom"' cmd += " -acmd '%s'" % mpcmd cmd += ' -apost "python post_mp.py tmp"' cmd += " -id %s.bench1.mp" % date launch(shlex.split(cmd)) cmd = "bench/score -bname 2 -gnum 2 -grate 3400000 -gtime 30 -adir mp" mpcmd = 'metaproc "udpgen_in_buf -n 2 -U 55555 | keyadd_initial_custom"' cmd += " -acmd '%s'" % mpcmd cmd += ' -apost "python post_mp.py tmp"' cmd += " -id %s.bench2.mp" % date launch(shlex.split(cmd)) cmd = "bench/score -bname 3 -gnum 2 -grate 2900000 -gtime 30 -adir mp" mpcmd = 'metaproc "udpgen_in_buf -n 2 -U 55555 | keyadd_initial_custom -2"' cmd += " -acmd '%s'" % mpcmd cmd += ' -apost "python post_mp.py tmp"' cmd += " -id %s.bench3.mp" % date launch(shlex.split(cmd))
#!/usr/bin/env python """ Runs the benchmark files until optimality. """ import launch import os os.environ["FOLDER"] = "benchmarks_until_optimal" os.environ["SAFT"] = "0.5" os.environ.setdefault("EXTRA_ARGS", "") os.environ["EXTRA_ARGS"] += " --saft_resolution 2000 " os.environ["MAX_COLUMNS"] = "1000000" launch.launch([ "./configs/benchmark.csv", "./configs/benchmark_4x4.csv", ])
def show_source(tree, path, column): line = self.model[path][ExceptionExplorer.LINE] file = self.model[path][ExceptionExplorer.FILE] import launch launch.launch('http://rox.sourceforge.net/2005/interfaces/Edit', '-l%d' % line, file)
import launch import copy import os import sys import subprocess import re class tangent_config(launch.config): def __init__(self, exec_path, input_file="", current_saft=""): launch.config.__init__(self, exec_path, input_file, current_saft) """ Executes the current run and fill stats. """ def execute(self): output_file = self.output_file for step in [0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 2, 5]: os.environ["TANGENT_STEP"] = str(step) self.output_file = output_file + ".tangent_step_" + str(step) return_status = launch.config.execute(self) return return_status os.environ.setdefault("FOLDER", "tangent_steps") os.environ.pop("SAFT", "") executions = launch.launch(config_implementation=tangent_config)
""" Copies the configs for every solver method (simplex, dual simplex and barrier). """ def multiple_solvers(c): l = [] for solver in range(3): cpy = copy.deepcopy(c) cpy.extra_args += " --master_solver " + str(solver) + " --no_warm_start_values --slow_warm_start" cpy.output_file += "_" + str(solver) l.append(cpy) cpy = copy.deepcopy(c) cpy.extra_args += " --no_warm_start_values " cpy.output_file += ".all_columns_at_once" l.append(cpy) return l os.environ.setdefault("FOLDER", "master_solver_benchmarks") os.environ.pop("SAFT", "") os.environ.setdefault("MAX_COLUMNS", "0") executions = launch.launch(extra_config_generator=multiple_solvers) #msb = msb_plotter.msb_plotter(executions) #fig = msb.plot() #plt.savefig("master_solver_benchmark_plot.png", dpi=300) #plt.close(fig) # #tb.table_generator().generate(executions)
def instance_launch(self, image, instancetype): launch.launch(config, image, instancetype) GLib.timeout_add_seconds(10, self.handler_poll_onetime)
#!/usr/bin/env python """ Tests how much the Cuts are actually helping. """ import launch import copy import os import sys def multiple_tricks(c): l = [] for combination in ["", "slave_cuts", "compare_slave_cuts"]: cpy = copy.deepcopy(c) cpy.extra_args += " --" + str(combination) + " " cpy.output_file += "_" + str(combination).replace(" ", "_") l.append(cpy) return l os.environ.setdefault("FOLDER", "cuts_benchmarks") os.environ.pop("SAFT", "") os.environ.setdefault("MAX_COLUMNS", "100") executions = launch.launch(extra_config_generator=multiple_tricks)
tol = 5e-5 #t_launch_est, t_cept = find_launch_time(time, tol, x, v,\ # mass, m_sat, target, host) #np.save('saved/saved_params/t_launch1_est.npy', t_launch_est) # Launch window selector #np.save('saved/saved_params/t_cept1.npy', t_cept) t_launch_est = np.load('saved/saved_params/t_launch1_est.npy') t_cept = np.load('saved/saved_params/t_cept1.npy') t_launch = t_launch_est[0] t_intercept = t_cept[0] launch_indx = np.argmin(np.abs(t_launch - time)) intercept_indx = np.argmin((np.abs(t_intercept - time))) # Predicted index of intercept print('Launch window selected at t=', t_launch) print('Estimated time of intercept: t=', t_intercept) x = x.transpose(); v = v.transpose() fin_t, fin_pos, fin_vel, sat_mass, fuel_rem, angle, launch_pos = launch.launch(time, x, v, t_launch, testing = False) x = x.transpose(); v = v.transpose() force_per_box, n_boxes, n_particles_sec_box, initial_fuel, launch_dur = launch.get_engine_settings(t_launch_est[0], fin_t) # Actually launch the thing from ast2000solarsystem import AST2000SolarSystem user = '******' seed = AST2000SolarSystem.get_seed(user) solar_system = AST2000SolarSystem(seed) solar_system.engine_settings(force_per_box, n_boxes, n_particles_sec_box,\ initial_fuel, launch_dur, launch_pos, t_launch) solar_system.mass_needed_launch(fin_pos) launched_indx = np.argmin(np.abs(time-fin_t)) import part4 as p4 x = x.transpose(); v = v.transpose() #Manual orientation
# settings and URL Mapping settings = {"static_path": os.path.join(os.path.dirname(__file__), "static")} current_path = os.path.dirname(__file__) application = tornado.web.Application([ (r"/home", RawDetailHanlder), (r"/get_gas_monitor_data", GetGasMonitorDataHandler), (r"/get_gas_analysis_event", GetGasAnalysisEventHandler), (r"/get_gas_suggestion", GetGasAnalysisSuggestionHandler), (r"/suggest_event", SuggestEventHandler), (r"/suggestion_electronic", ElectronicHandler), (r"/suggestion_escape", EscapeHandler), (r"/find_one", FindOneCaseHandler), (r"/case_search", CaseSearchHandler), (r"/elec_sure", ElecSureHandler), (r"/timeline", TimelineHandler), (r"/timeline_data", TimelineDataHandler), (r'^/icon/(.*)$', StaticFileHandler, { "path": os.path.join(current_path, "static/icon") }), ], **settings) # run... if __name__ == "__main__": launch() application.listen(8080) tornado.ioloop.IOLoop.current().start()
# retarget + change input_file retarget_bin = os.path.join( "build8", "config_retarget") if "PANDORA" in os.environ else os.path.join( "build9", "config_retarget") retarget_command = retarget_bin + " --to " + str( os.environ["PYRAMID_TO"]) + " " + str(cgdump) print(retarget_command) status = subprocess.run(retarget_command, universal_newlines=True, shell=True) #start as slavewarmstart! self.input_file = "" self.extra_args = " --slave_warm_start " + str( cgdump) + ".retargeted.cgdump" + str(self.extra_args) self.output_file += ".pyramid" return_status = launch.config.execute(self) return return_status os.environ.setdefault("FOLDER", "pyramids") os.environ.pop("SAFT", "") os.environ["PYRAMID_TO"] = sys.argv[2] executions = launch.launch(files=[sys.argv[1]], config_implementation=pyramid_config)
#!/usr/bin/env python """ Tests how much Multiple Concurrent Slaves help. """ import launch import copy import os import sys def multiple_tricks(c): l = [] for combination in [1,2,4,6,8,10]: cpy = copy.deepcopy(c) cpy.extra_args += "--master_threshold 20000 --slaves " + str(combination) + " " cpy.output_file += "_slaves_" + str(combination) l.append(cpy) return l os.environ.setdefault("FOLDER", "callback_benchmarks") os.environ.pop("SAFT", "") os.environ.setdefault("MAX_COLUMNS", "400") executions = launch.launch(files=["configs/50_ormh.csv_downsampled_8.csv"], extra_config_generator=multiple_tricks)
from launch import launch from orbit import orbit from land import land import krpc conn = krpc.connect(name='Drone Controller') vessel = conn.space_center.active_vessel srf_frame = vessel.orbit.body.reference_frame launchpad=(vessel.flight(srf_frame).longitude,vessel.flight(srf_frame).latitude) print(launchpad) print("Start") launch("Launcher2",50000,75000,270) land("Launcher2",launchpad) #orbit("Launcher2")
#!/usr/bin/env python import launch import copy import os import sys """ Loads the config as slave_warm_start. """ def warm_slaves(c): c.extra_args += " --slave_warm_start " + c.input_file c.input_file = "" return [c] os.environ["FOLDER"] = "slave_warm_starts" os.environ.pop("SAFT", "") os.environ["MAX_COLUMNS"] = "10" executions = launch.launch(extra_config_generator=warm_slaves)
def main(): """ Main function; * Launch spot request of NUMINSTANCE * Run Benchmark * Download Log * Plot data from log """ session = boto3.Session(profile_name=CREDENTIAL_PROFILE) client = session.client('ec2', region_name=REGION) if NUM_INSTANCE > 0: spot_request_ids = launch.launch(client, NUM_INSTANCE, CONFIG_DICT) print("CHECK SECURITY GROUP ALLOWED IP SETTINGS!!!") # Wait for our spots to fulfill launch.wait_for_fulfillment(client, spot_request_ids, copy.deepcopy(spot_request_ids)) spot_instance_response = client.describe_spot_instance_requests( SpotInstanceRequestIds=spot_request_ids) instance_ids = [ result["InstanceId"] for result in spot_instance_response["SpotInstanceRequests"] ] client.create_tags(Resources=instance_ids, Tags=TAG) # Wait Running launch.wait_for_running(client, instance_ids, copy.deepcopy(instance_ids)) time.sleep(15) launch.wait_ping(client, instance_ids, copy.deepcopy(instance_ids)) if REBOOT: print("Rebooting instances...") session = boto3.Session(profile_name=CREDENTIAL_PROFILE) ec2 = session.resource('ec2', region_name=REGION) instances = ec2.instances.filter(Filters=[{ 'Name': 'instance-state-name', 'Values': ['running'] }, { 'Name': 'tag:ClusterId', 'Values': [CLUSTER_ID] }]) instance_ids = [x.id for x in instances] client.reboot_instances(InstanceIds=instance_ids) launch.wait_ping(client, instance_ids, copy.deepcopy(instance_ids)) if RUN: for i in range(NUM_RUN): run.run_benchmark() if TERMINATE: instances = client.instances.filter(Filters=[{ 'Name': 'instance-state-name', 'Values': ['running'] }, { 'Name': 'tag:ClusterId', 'Values': [CLUSTER_ID] }]) instance_ids = [x.id for x in instances] # TODO get spot_request_ids launch.terminate(client, spot_request_ids, instance_ids)