def decoding(self): fitness_score = 0 # Send scenario object to simulation script s_f = open('scenario.obj', 'wb') pickle.dump(self.scenario, s_f) s_f.truncate() s_f.close() for x in range(0, 100): if os.path.isfile('result.obj') == True: os.remove("result.obj") os.system("python3 simulation.py scenario.obj result.obj") resultObj = None # Read fitness score if os.path.isfile('result.obj') == True: f_f = open('result.obj', 'rb') resultObj = pickle.load(f_f) f_f.close() if resultObj != None and resultObj['fitness'] != '': return resultObj break else: util.print_debug(" ***** " + str(x) + "th/10 trial: Fail to get fitness, try again ***** ") return None
def mutation(self, gen): i = 0 while(i<len(self.pop)) : eachChs = self.pop[i] i += 1 if self.pm >= random.random(): beforeMutation = copy.deepcopy(eachChs) # select mutation index npc_index = random.randint(0, eachChs.code_x1_length-1) time_index = random.randint(0, eachChs.code_x2_length-1) # Record which chromosomes have been touched self.touched_chs.append(eachChs) actionIndex = random.randint(0, 1) if actionIndex == 0: # Change Speed eachChs.scenario[npc_index][time_index][0] = random.uniform(self.bounds[0][0], self.bounds[0][1]) elif actionIndex == 1: # Change direction eachChs.scenario[npc_index][time_index][1] = random.randrange(self.bounds[1][0], self.bounds[1][1]) # Only run simulation for the chromosomes that are touched in this generation if eachChs in self.touched_chs: eachChs.func(gen, self.isInLis) else: util.print_debug(" --- The chromosome has not been touched in this generation, skip simulation. ---") util.print_debug(" --- In mutation: Current scenario has y = " + str(eachChs.y))
def do_merge(self, others): pto_temp_file = ManagedTempFile.get(None, ".pto") command = "pto_merge" args = list() args.append("--output=%s" % pto_temp_file) # Possible this is still empty if self.file_name and os.path.exists(self.file_name): args.append(self.file_name) for other in others: args.append(other.get_a_file_name()) print_debug(args) (rc, output) = Execute.with_output(command, args) # go go go if not rc == 0: print print print print 'Output:' print output print 'rc: %d' % rc raise Exception('failed pto_merge') return PTOProject.from_temp_file(pto_temp_file)
def process(folder, debug): # run through all the folders and files in the path "folder" codetrscp_content = '' trainscp_content = '' for bdir, _, files in os.walk(folder): for fname in files: if fname[-4:] != '.wav': continue wav_fname = bdir + '/' + fname mfcc_fname = bdir + '/' + fname[:-4] + MFC_EXTENSION codetrscp_content += wav_fname + ' ' + mfcc_fname + '\n' trainscp_content += mfcc_fname + '\n' util.print_debug(debug, "Dealt with file '" + wav_fname + "'") # listing all .wav for mfcc extraction process f = open('files/codetr.scp', 'w') f.write(str(codetrscp_content)) f.close() # listing all .mfc for modelling process f = open('files/train.scp', 'w') f.write(str(trainscp_content)) f.close() call(['HCopy', '-C', 'config/conf-extraction', '-S', 'files/codetr.scp'])
def __repr__(self): '''The primary line, ie not including any comments''' self.update() print_debug() print_debug('original: %s' % self.text) print_debug('variables: %s' % self.variables) ret = self.prefix printed = set() if self.variable_print_order: for k in self.variable_print_order: if k in self.variables: v = self.variables[k] print_debug('k: %s, v: %s' % (repr(k), repr(v))) printed.add(k) ret += ' %s' % self.print_variable(k) else: print 'WARNING; no variable print order' for k in self.variables: if k in printed: continue ret += ' %s' % self.print_variable(k) print_debug('final: %s' % ret) return ret
def read_old_metadata(): """Handle options.conf providing package, url and archives.""" if not os.path.exists(os.path.join(os.getcwd(), "options.conf")): return None, None, None, None, None, [], [] config_f = configparser.ConfigParser(interpolation=None) config_f.read("options.conf") if "package" not in config_f.sections(): return None, None, None, None, None, [], [] archives = config_f["package"].get("archives") archives = archives.split() if archives else [] archives_from_git = config_f["package"].get("archives_from_git") archives_from_git = archives_from_git.split() if archives_from_git else [] if util.debugging: print_debug(f"\nARCHIVES {archives}") print_debug(f"ARCHIVES_GIT 1: {archives_from_git}") return ( config_f["package"].get("name"), config_f["package"].get("url"), config_f["package"].get("download_from_git"), config_f["package"].get("branch"), config_f["package"].get("force_module"), config_f["package"].get("force_fullclone"), archives, archives_from_git, )
def simple(cmd, working_dir = None): '''Returns rc of process, no output''' print_debug('cmd in: %s' % cmd) if True: #print 'Executing' os.sys.stdout.flush() ret = os.system(cmd) os.sys.stdout.flush() #print 'Execute done' return ret else: cmd = "/bin/bash " + cmd output = '' to_exec = cmd.split(' ') print 'going to execute: %s' % to_exec subp = subprocess.Popen(to_exec) while subp.returncode is None: # Hmm how to treat stdout vs stderror? com = subp.communicate()[0] if com: print com com = subp.communicate()[1] if com: print com time.sleep(0.05) subp.poll() return subp.returncode
def __str__(self, key_blacklist = None): '''The primary line, ie not including any comments''' if key_blacklist is None: key_blacklist = [] self.update() print_debug() print_debug('original: %s' % self.text) print_debug('variables: %s' % self.variables) ret = self.prefix() printed = set() for k in self.variable_print_order(): if k in key_blacklist: continue if k in self.variables: v = self.variables[k] print_debug('k: %s, v: %s' % (repr(k), repr(v))) printed.add(k) ret += ' %s' % self.print_variable(k) for k in self.variables: if k in key_blacklist: continue if k in printed: continue ret += ' %s' % self.print_variable(k) print_debug('final: %s' % ret) return ret
def get_hdd_device_file_names(self): ret = set() for dir in os.listdir("/dev"): raw = re.match("[hs]d[a-z]", dir) if raw: ret.add(os.path.join("/dev", raw.group())) print_debug(ret) return ret
def __del__(self): try: if os.path.exists(self.file_name): shutil.rmtree(self.file_name) print_debug('Deleted temp dir %s' % self.file_name) else: print_debug("Didn't delete inexistant temp dir %s" % self.file_name) # Ignore if it was never created except: print 'WARNING: failed to delete temp dir: %s' % self.file_name
def findFitness(self, deltaDlist, dList, isEgoFault, isHit, hitTime): # The higher the fitness, the better. minDeltaD = self.maxint for npc in deltaDlist: # ith NPC hitCounter = 0 for deltaD in npc: if isHit == True and hitCounter == hitTime: break if deltaD < minDeltaD: minDeltaD = deltaD # Find the min deltaD over time slices for each NPC as the fitness hitCounter += 1 util.print_debug(deltaDlist) util.print_debug(" *** minDeltaD is " + str(minDeltaD) + " *** ") minD = self.maxint for npc in dList: # ith NPC hitCounter = 0 for d in npc: if isHit == True and hitCounter == hitTime: break if d < minD: minD = d hitCounter += 1 util.print_debug(dList) util.print_debug(" *** minD is " + str(minD) + " *** ") fitness = 0.5 * minD + 0.5 * minDeltaD return fitness * -1
def select_roulette(self): sum_f = 0 util.print_debug(" +++ Before select() +++ ") for i in range(0, self.pop_size): if self.pop[i].y == 0: self.pop[i].y = 0.001 util.print_debug(" === Fitness result of the scenario is " + str(self.pop[i].y) + " === ") ############################################################ min = self.pop[0].y for k in range(0, self.pop_size): if self.pop[k].y < min: min = self.pop[k].y if min < 0: for l in range(0, self.pop_size): self.pop[l].y = self.pop[l].y + (-1) * min # roulette for i in range(0, self.pop_size): sum_f += self.pop[i].y p = [0] * self.pop_size for i in range(0, self.pop_size): if sum_f == 0: sum_f = 1 p[i] = self.pop[i].y / sum_f q = [0] * self.pop_size q[0] = 0 for i in range(0, self.pop_size): s = 0 for j in range(0, i+1): s += p[j] q[i] = s # start roulette v = [] for i in range(0, self.pop_size): r = random.random() if r < q[0]: selectedChromosome = Chromosome(self.bounds, self.NPC_size, self.time_size) selectedChromosome.scenario = self.pop[0].scenario selectedChromosome.y = self.pop[0].y v.append(selectedChromosome) for j in range(1, self.pop_size): if q[j - 1] < r <= q[j]: selectedChromosome = Chromosome(self.bounds, self.NPC_size, self.time_size) selectedChromosome.scenario = self.pop[j].scenario selectedChromosome.y = self.pop[j].y v.append(selectedChromosome) self.pop = copy.deepcopy(v) ############################################################ util.print_debug(" +++ After select() +++ ") for i in range(0, self.pop_size): util.print_debug(" === Fitness result of the scenario is " + str(self.pop[i].y) + " === ")
def optimize_xy_only(self): # XXX: move this to earlier if possible ''' Added by pto_merge or something v Ra0 Rb0 Rc0 Rd0 Re0 Vb0 Vc0 Vd0 v Eb1 Eev1 Er1 v Eb2 Eev2 Er2 v Eb3 Eev3 Er3 v Need something like (assume image 0 is anchor) v d1 e1 v d2 e2 v d3 e3 v After saving, get huge i lines #-hugin cropFactor=1 i w2816 h2112 f-2 Eb1 Eev0 Er1 Ra0 Rb0 Rc0 Rd0 Re0 Va1 Vb0 Vc0 Vd0 Vx-0 Vy-0 a0 b0 c0 d-0 e-0 g-0 p0 r0 t-0 v51 y0 Vm5 u10 n"x00000_y00033.jpg" ''' print_debug('Fixing up v (optimization variable) lines...') new_project_text = '' new_lines = '' for i in range(1, len(self.get_image_file_names())): # optimize d (x) and e (y) for all other than anchor new_lines += 'v d%d e%d \n' % (i, i) new_lines += 'v \n' for line in self.text.split('\n'): if line == '': new_project_text += '\n' elif line[0] == 'v': # Replace once, ignore others new_project_text += new_lines new_lines = '' else: new_project_text += line + '\n' self.text = new_project_text print print print_debug(self.text) print print
def reparse(self): self.variables = dict() first = True #for token in self.text.split(' '): for (k, v) in self.get_tokens(): #print 'token: "%s"' % token #k = token[0] #v = token[1:] print_debug('k: %s, v: %s' % (repr(k), repr(v))) # We can still have empty string if not v is None and len(v) == 0: v = None if first: prefix = k if not v is None and len(v) > 0: print 'Line: %s' % self.text print 'ERROR: line type should not have value: %s' % repr(v) raise Exception('confused') first = False continue # Convert if possible try: if k in self.key_variables(): pass elif k in self.int_variables(): v = int(v) elif k in self.float_variables(): v = float(v) elif k in self.string_variables(): # Already in string form pass else: print 'WARNING: unknown data type on %s (full: %s)' % (k, self.text) raise Exception('Unknown key') except: print 'line: %s' % self.text print 'key: %s, value: %s' % (repr(k), repr(v)) self.print_variables() raise # Ready to roll self.set_variable(k, v)
def func(self, gen=None, lisFlag=False): resultObj = self.decoding() self.y = float(resultObj['fitness']) if resultObj['fault'] == 'ego': # Found a bug of ego ADS util.print_debug(" ***** Found an accident where ego is at fault ***** ") # Dump the scenario where causes the accident if os.path.exists('AccidentScenario') == False: os.mkdir('AccidentScenario') now = datetime.now() date_time = now.strftime("%m-%d-%Y-%H-%M-%S") ckName = 'AccidentScenario/accident-gen' + str(gen) + '-' + date_time if lisFlag == True: ckName = ckName + "-LIS" a_f = open(ckName, 'wb') pickle.dump(self, a_f) a_f.truncate() a_f.close()
def handle(self): server = self.options.get('server', DIAG_VIEWER_HSOT) client = DiagnosticsViewerClient( server=server, cert=False) try: id, key = client.login( self.options['username'], self.options['password']) except Exception as e: print_debug(e) return -1 path = self.options.get('path') print_message('Uploading directory {}'.format(path)) try: dataset_id = client.upload_package(path) except Exception as e: print_debug(e) return -1 return json.dumps({'dataset_id': dataset_id})
def reparse(self): '''Force a parse''' if False: print 'WARNING: pto parsing disabled' return self.panorama_line = None self.mode_line = None self.comment_lines = list() self.variable_lines = list() self.control_point_lines = list() self.absolute_control_point_lines = list() self.image_lines = list() self.misc_lines = list() self.optimizer_lines = list() #print self.text print_debug('Beginning split on text of len %d' % (len(self.get_text()))) for line in self.get_text().split('\n'): print_debug('Processing line: %s' % line) # Single * is end of file # Any comments / garbage is allowed to follow #if line.strip() == '*': # break # In practice this is PTOptimizer output I want # Add an option later if needed to override self.parse_line(line) print_debug() print 'Finished reparse' self.parsed = True
def on_collision(agent1, agent2, contact): #util.print_debug(" --- On Collision, ego speed: " + str(agent1.state.speed) + ", NPC speed: " + str(agent2.state.speed)) if self.isHit == True: return self.isHit = True if agent2 is None or agent1 is None: self.isEgoFault = True util.print_debug(" --- Hit road obstacle --- ") return apollo = agent1 npcVehicle = agent2 if agent2.name == "XE_Rigged-apollo_3_5": apollo = agent2 npcVehicle = agent1 util.print_debug(" --- On Collision, ego speed: " + str(apollo.state.speed) + ", NPC speed: " + str(npcVehicle.state.speed)) if apollo.state.speed <= 0.005: self.isEgoFault = False return self.isEgoFault = liability.isEgoFault(apollo, npcVehicle, sim, init_degree) # Compute deltaD when it is ego fault if self.isEgoFault == True: self.egoFaultDeltaD = self.findCollisionDeltaD( apollo, npcVehicle) util.print_debug(" >>>>>>> Ego fault delta D is " + str(self.egoFaultDeltaD))
def isHitEdge(ego, sim, init_degree): # init_degree = ego.state.rotation.y lane_center = sim.map_point_on_lane(ego.state.transform.position) ego_x = ego.state.transform.position.x ego_y = ego.state.transform.position.y ego_z = ego.state.transform.position.z ego_point = Point3D(ego_x, ego_y, ego_z) mp_x = lane_center.position.x mp_y = lane_center.position.y mp_z = lane_center.position.z mp_point = Point3D(mp_x, mp_y, mp_z) # x1, y1, z1 = 160.809997558594, 10.1931667327881, 8.11004638671875 # x_e_1, y_e_1, z_e_1 = 101.646751403809, 10.1278858184814, 8.18318462371826 # x6, y6, z6 = 24.9999961853027, 10.1931667327881, -3.77267646789551 # x_e_6, y_e_6, z_e_6 = 84.163330078125, 10.1277523040771, -3.77213048934937 # l1 = Line3D(Point3D(x1, y1, z1), Point3D(x_e_1, y_e_1, z_e_1)) # l6 = Line3D(Point3D(x6, y6, z6), Point3D(x_e_6, y_e_6, z_e_6)) diagnal_length = pow(ego.bounding_box.size.z, 2) + pow( ego.bounding_box.size.x, 2) diagnal_length = math.sqrt(diagnal_length) rotate_degree = abs(ego.state.rotation.y - init_degree) + 23.86 ego_size_z = (diagnal_length / 2.0) * math.sin(math.radians(rotate_degree)) if (l6.distance(mp_point) <= 1): lane_bound = mp_z - 2.2 if (ego.state.transform.position.z - ego_size_z <= lane_bound): util.print_debug("--- Cross the boundary --- ") return True if (l1.distance(mp_point) <= 1): lane_bound = mp_z + 2.2 if (ego.state.transform.position.z + ego_size_z >= lane_bound): util.print_debug("--- Cross the boundary --- ") return True return False
def isHitYellowLine(ego, sim, init_degree): lane_center = sim.map_point_on_lane(ego.state.transform.position) ego_x = ego.state.transform.position.x ego_y = ego.state.transform.position.y ego_z = ego.state.transform.position.z ego_point = Point3D(ego_x, ego_y, ego_z) mp_x = lane_center.position.x mp_y = lane_center.position.y mp_z = lane_center.position.z mp_point = Point3D(mp_x, mp_y, mp_z) # x1, y1, z1 = 145.000030517578, 10.1931667327881, 4.20298147201538 # x_e_1, y_e_1, z_e_1 = 132.136016845703, 10.1280860900879, 4.20766830444336 # x6, y6, z6 = 24.9999923706055, 10.1931667327881, 0.026848778128624 # x_e_6, y_e_6, z_e_6 = 82.6629028320313, 10.1278924942017, 0.0420729108154774 # l1 = Line3D(Point3D(x1, y1, z1), Point3D(x_e_1, y_e_1, z_e_1)) # l6 = Line3D(Point3D(x6, y6, z6), Point3D(x_e_6, y_e_6, z_e_6)) diagnal_length = pow(ego.bounding_box.size.z, 2) + pow( ego.bounding_box.size.x, 2) diagnal_length = math.sqrt(diagnal_length) rotate_degree = abs(ego.state.rotation.y - init_degree) + 23.86 ego_size_z = (diagnal_length / 2.0) * math.sin(math.radians(rotate_degree)) if (l1.distance(mp_point) <= 1): lane_bound = mp_z - 2.2 if (ego.state.transform.position.z - ego_size_z <= lane_bound): util.print_debug(" --- Cross the yellow line") return True if (l6.distance(mp_point) <= 1): lane_bound = mp_z + 2.2 if (ego.state.transform.position.z + ego_size_z >= lane_bound): util.print_debug(" --- Cross the yellow line") return True return False
def findDeltaD(self, ego, npc): d = liability.findDistance(ego, npc) - 4.6 # 4.6 is the length of a car deltaD = self.maxint # The smaller delta D, the better deltaDFront = self.maxint deltaDSide = self.maxint # When npc is in front if npc.state.transform.position.x + 4.6 < ego.state.transform.position.x and npc.state.transform.position.x + 20 > ego.state.transform.position.x: if npc.state.transform.position.z > ego.state.transform.position.z - 2 and npc.state.transform.position.z < ego.state.transform.position.z + 2: deltaDFront = d - self.brakeDist(ego.state.speed) util.print_debug(" --- Delta D Front: " + str(deltaDFront)) # When ego is changing line to npc's front if npc.state.transform.position.x - 4.6 > ego.state.transform.position.x and npc.state.transform.position.x - 20 < ego.state.transform.position.x: if npc.state.transform.position.z + 2 > ego.state.transform.position.z and npc.state.transform.position.z - 2 < ego.state.transform.position.z and ( ego.state.rotation.y < 269 or ego.state.rotation.y > 271): deltaDSide = d - self.brakeDist(npc.state.speed) util.print_debug(" --- Delta D Side: " + str(deltaDSide)) deltaD = min(deltaDSide, deltaDFront) return deltaD
def process_archives(self, main_src): """Process extra sources needed by package. This sources include: archives, go archives and multiversion. """ go_archives = [] multiver_archives = [] src_objects = [] if os.path.basename(main_src.url) == "list": # Add extra archives and multiversion for Go packages self.process_go_archives(go_archives) else: # Add multiversion for the rest of the patterns self.process_multiver_archives(main_src, multiver_archives) full_archives = self.archives + go_archives + multiver_archives # Download and extract full list for arch_url, destination in zip(full_archives[::2], full_archives[1::2]): if util.debugging: print_debug("arch_url 3: {} - {}".format( arch_url, destination)) src_path = self.check_or_get_file(arch_url, os.path.basename(arch_url), mode="a") # Create source object and extract archive archive = Source(arch_url, destination, src_path, self.config.default_pattern) # Add archive prefix to list self.config.archive_details[arch_url + "prefix"] = archive.prefix self.prefixes[arch_url] = archive.prefix # Add archive to list src_objects.append(archive) return src_objects
def select_top2(self): util.print_debug(" +++ Before select() +++ ") for i in range(0, self.pop_size): util.print_debug(" === Fitness result of the scenario is " + str(self.pop[i].y) + " === ") maxFitness = 0 v = [] for i in range(0, self.pop_size): if self.pop[i].y > maxFitness: maxFitness = self.pop[i].y for i in range(0, self.pop_size): if self.pop[i].y == maxFitness: for j in range(int(self.pop_size / 2.0)): selectedChromosome = Chromosome(self.bounds, self.NPC_size, self.time_size) selectedChromosome.scenario = self.pop[i].scenario selectedChromosome.y = self.pop[i].y v.append(selectedChromosome) break max2Fitness = 0 for i in range(0, self.pop_size): if self.pop[i].y > max2Fitness and self.pop[i].y != maxFitness: max2Fitness = self.pop[i].y for i in range(0, self.pop_size): if self.pop[i].y == max2Fitness: for j in range(int(self.pop_size / 2.0)): selectedChromosome = Chromosome(self.bounds, self.NPC_size, self.time_size) selectedChromosome.scenario = self.pop[i].scenario selectedChromosome.y = self.pop[i].y v.append(selectedChromosome) break self.pop = copy.deepcopy(v) util.print_debug(" +++ After select() +++ ") for i in range(0, self.pop_size): util.print_debug(" === Fitness result of the scenario is " + str(self.pop[i].y) + " === ")
def reparse(self): '''Force a parse''' if False: print 'WARNING: pto parsing disabled' return self.panorama_line = None self.mode_line = None self.comment_lines = list() self.variable_lines = list() self.control_point_lines = list() self.image_lines = list() self.misc_lines = list() #print self.text print_debug('Beginning split on text of len %d' % (len(self.get_text()))) for line in self.get_text().split('\n'): print_debug('Processing line: %s' % line) self.parse_line(line) print_debug()
def poll(): params = {'request': 'next'} url = 'http://' + FRONTEND_POLLER_HOST options = {} try: job = requests.get(url, params).content job = json.loads(job) print_message(job, 'ok') except ConnectionError as ce: print_message("Error requesting job from frontend poller") print_debug(e) return -3, None if not job: return -2, None try: options['user'] = job.get('user') options['run_name'] = job.get('run_name') options['job_id'] = job.get('job_id') if not job.get('diag_type'): options['diag_type'] = 'amwg' print_message('job options: {}'.format(options), 'ok') except Exception as e: print_debug(e) return -1, options['job_id'] run_type = job.get('run_type') if not run_type: print_message("No run type in job request") return -1, None if run_type == 'diagnostic': try: sets = json.loads(job.get('diag_set')) except Exception as e: print_message('Unable to unpack diag_set') sets = '5' options['set'] = sets options['model_path'] = job.get('model_path') options['obs_path'] = job.get('obs_path') options['output_dir'] = job.get('output_dir') print_message('Got a new job with parameters:\n{}'.format(options), 'ok') handler = StartDiagHandler(options) elif run_type == 'model': handler = StartModelHandler(options) elif run_type == 'update': handler = UpdateJobHandler(options) elif run_type == 'upload_to_viewer': options['server'] = job.get('request_attr').get('server') options['username'] = job.get('request_attr').get('username') options['password'] = job.get('request_attr').get('password') options['path'] = job.get('request_attr').get('path') handler = UploadOutputHandler(options) else: print_message("Unrecognized request: {}".format(run_type)) return -1, None try: response = handler.handle() except Exception as e: print_message("Error in job handler with options \n {}".format(options)) print_debug(e) return -1, None try: print_message('Sending message to frontend poller: {}'.format(response)) handler.respond(response) except Exception as e: print_message("Error sending response to job \n {}".format(options)) print_debug(e) return -1, None return 0, None
def regen_devices(self): cur_devices = self.get_hdd_device_file_names() row = 0 for device_file_name in sorted(cur_devices): cur_devices.add(device_file_name) if self.devices and device_file_name in self.devices: # old device, update device = self.devices[device_file_name] #device.value_label.setText(device.temp_str()) device.value_label.setText(device.temp_str()) continue else: # new device device = Device() print '***added %s' % device_file_name device.file_name = device_file_name #device.dev_label = QtGui.QLabel(self) #device.dev_label.setText(device.file_name) device.value_label = QtGui.QLabel(self) to_set = device.temp_str() print to_set device.value_label.setText(to_set) if True: self.layout.addWidget(device.value_label) elif True: self.layout.addWidget(device.dev_label, row, 0) self.layout.addWidget(device.value_label, row, 1) else: device.layout = QtGui.QHBoxLayout() device.layout.addWidget(device.dev_label) device.layout.addWidget(device.value_label) print 'setting up layout' device.widget = QtGui.QWidget(self) print 'setting up layout 2' device.widget.setLayout(self.layout) print 'setting up layout 3' # Does not like this self.layout.addWidget(device.widget) print 'setting up layout done' #self.devices.add(device) self.devices[device.file_name] = device row += 1 # Get rid of removed HDDs old_devices = set(self.devices) print_debug('cur devices: %s' % cur_devices) print_debug('old devices: %s' % old_devices) removed_devices = old_devices - cur_devices print_debug('removed devices: %s' % removed_devices) for device_file_name in removed_devices: print '***removed %s' % device_file_name device = self.devices[device_file_name] if True: #self.layout.removeWidget(device.dev_label) self.layout.removeWidget(device.value_label) device.value_label.setParent(None) else: self.layout.removeWidget(device.layout) del self.devices[device_file_name]
def main(): """Entry point for autospec.""" parser = argparse.ArgumentParser() parser.add_argument( "-g", "--skip-git", action="store_false", dest="git", default=True, help="Don't commit result to git", ) parser.add_argument( "-n", "--name", action="store", dest="name", default="", help="Override the package name", ) parser.add_argument( "-v", "--version", action="store", dest="version", default="", help="Override the package version", ) parser.add_argument( "url", default="", nargs="?", help="tarball URL (e.g." " http://example.com/downloads/mytar.tar.gz)", ) parser.add_argument( "-a", "--archives", action="store", dest="archives", default=[], nargs="*", help="tarball URLs for additional source archives and " " a location for the sources to be extacted to (e.g. " " http://example.com/downloads/dependency.tar.gz " " /directory/relative/to/extract/root )", ) parser.add_argument( "-l", "--license-only", action="store_true", dest="license_only", default=False, help="Only scan for license files", ) parser.add_argument( "-b", "--skip-bump", dest="bump", action="store_false", default=True, help="Don't bump release number", ) parser.add_argument( "-c", "--config", dest="config", action="store", default="/usr/share/defaults/autospec/autospec.conf", help="Set configuration file to use", ) parser.add_argument( "-t", "--target", dest="target", action="store", required=True, help="Target location to create or reuse", ) parser.add_argument( "-i", "--integrity", action="store_true", default=False, help="Search for package signature from source URL and " "attempt to verify package", ) parser.add_argument( "-p", "--prep-only", action="store_true", default=False, help="Only perform preparatory work on package", ) parser.add_argument( "--non_interactive", action="store_true", default=False, help="Disable interactive mode for package verification", ) parser.add_argument( "-C", "--cleanup", dest="cleanup", action="store_true", default=False, help="Clean up mock chroot after building the package", ) parser.add_argument( "-m", "--mock-config", action="store", default="clear", help="Value to pass with Mock's -r option. Defaults to " '"clear", meaning that Mock will use ' "/etc/mock/clear.cfg.", ) parser.add_argument( "-o", "--mock-opts", action="store", default="", help="Arbitrary options to pass down to mock when " "building a package.", ) parser.add_argument( "-dg", "--download_from_git", action="store", dest="download_from_git", default=None, help="Download source from git", ) parser.add_argument( "-rdg", "--redownload_from_git", action="store_true", dest="redownload_from_git", default=False, help="Redownload source from git", ) parser.add_argument( "-fb", "--from_branch", action="store", dest="branch", default=None, help="Define the git branch to download the source from", ) parser.add_argument( "-ag", "--archives_from_git", action="store", dest="archives_from_git", default=[], nargs="*", help="git URL for additional archives, the location for" " the sources to be extracted to and the branch to download" " from, with master as the default (e.g." " http://example.com/downloads/dependency.tar.gz" " /directory/relative/to/extract/root master " " Disable download submodule from git [BOOLEAN] " " Force full clone from git [BOOLEAN] )", ) parser.add_argument( "-rag", "--redownload_archive", action="store_true", dest="redownload_archive", default=False, help="Redownload archives", ) parser.add_argument( "-dsub", "--disable_submodule", action="store", dest="force_module", default=None, help="Disable download submodules from git", ) parser.add_argument( "-ffc", "--force_fullclone", action="store", dest="force_fullclone", default=None, help="Force full clone from git", ) parser.add_argument( "-dfr", "--do_file_restart", action="store_false", dest="do_file_restart", default=True, help="Disable file_restart mechanism", ) parser.add_argument( "-dbg", "--debug", action="store_true", dest="debug", default=False, help="Enable debugging", ) args = parser.parse_args() a_name, a_url, a_download_from_git, a_branch, a_force_module, a_force_fullclone, a_archives, a_archives_from_git = read_old_metadata( ) name = args.name or a_name url = args.url or a_url archives = args.archives or a_archives archives_from_git = args.archives_from_git or a_archives_from_git util.debugging = args.debug args.integrity = False if os.path.exists(f"{name}.license") == False: write_out(f"{name}.license", "GPL-2.0\n") print(f"Created default mock license file") mock_dir_pattern = re.compile( r"(?:\-\-config-opts=basedir=)([a-zA-Z0-9\.\_\+\-\/]*)") short_circuit_pattern = re.compile(r"(?:\-\-short-circuit=)([a-zA-Z-]+)") if util.debugging: print_debug(f"args.mock_config: {args.mock_config}") print_debug(f"args.mock_opts: {args.mock_opts}") mock_dir = "" short_circuit = "" mock_dir_match = mock_dir_pattern.search(args.mock_opts) if (mock_dir_match): mock_dir = mock_dir_match.group(1) if util.debugging: print_debug(f"mock_dir: {mock_dir}") else: mock_dir = "/var/lib/mock" if util.debugging: print_debug(f"mock_dir: {mock_dir}") short_circuit_match = short_circuit_pattern.search(args.mock_opts) if (short_circuit_match): short_circuit = short_circuit_match.group(1) print_info(f"short_circuit: {short_circuit}") else: short_circuit = None if short_circuit == "prep" or short_circuit is None: args.bump = True else: args.bump = False if util.debugging: print_debug("a_download_from_git: {}".format( str(str_to_bool(a_download_from_git)))) if args.download_from_git is not None: download_from_git = str_to_bool(args.download_from_git) if util.debugging: print_debug("args.download_from_git: {}".format( str(str_to_bool(args.download_from_git)))) print_debug("download_from_git: {}".format(str(download_from_git))) else: download_from_git = str_to_bool(a_download_from_git) if util.debugging: print_debug("args.download_from_git: {}".format( str(str_to_bool(args.download_from_git)))) print_debug("download_from_git: {}".format(str(download_from_git))) if util.debugging: print_debug("a_force_module: {}".format( str(str_to_bool(a_force_module)))) if args.force_module is not None: force_module = str_to_bool(args.force_module) if util.debugging: print_debug("args.force_module: {}".format( str(str_to_bool(args.force_module)))) print_debug("force_module: {}".format(str(force_module))) else: force_module = str_to_bool(a_force_module) if util.debugging: print_debug("args.force_module: {}".format( str(str_to_bool(args.force_module)))) print_debug("force_module: {}".format(str(force_module))) if util.debugging: print_debug("a_force_fullclone: {}".format( str(str_to_bool(a_force_fullclone)))) if args.force_fullclone is not None: force_fullclone = str_to_bool(args.force_fullclone) if util.debugging: print_debug("args.force_fullclone: {}".format( str(str_to_bool(args.force_fullclone)))) print_debug("force_fullclone: {}".format(str(force_fullclone))) else: force_fullclone = str_to_bool(a_force_fullclone) if util.debugging: print_debug("args.force_fullclone: {}".format( str(str_to_bool(args.force_fullclone)))) print_debug("force_fullclone: {}".format(str(force_fullclone))) do_file_restart = args.do_file_restart redownload_from_git = args.redownload_from_git redownload_archive = args.redownload_archive if download_from_git: if util.debugging: print_debug("a_branch: {}".format(str(a_branch))) if args.branch is None and a_branch: branch = str(a_branch) if util.debugging: print_debug("args.branch: {}".format(str(args.branch))) print_debug("branch: {}".format(str(branch))) elif args.branch is None and not a_branch: branch = str("master") if util.debugging: print_debug("args.branch: {}".format(str(args.branch))) print_debug("branch: {}".format(str(branch))) elif args.branch is not None: branch = str(args.branch) if util.debugging: print_debug("args.branch: {}".format(str(args.branch))) print_debug("branch: {}".format(str(branch))) else: branch = None if util.debugging: print_debug("args.url: {}".format(args.url)) print_debug("url: {}".format(url)) print_debug("redownload_from_git: {}".format(str(redownload_from_git))) print_debug("redownload_archive: {}".format(str(redownload_archive))) if archives: if util.debugging: print_debug("a_archives: {}".format(list(a_archives))) if args.archives is None and a_archives: archives = list(a_archives) if util.debugging: print_debug("args.archives 1: {}".format(list(args.archives))) print_debug("archives 1: {}".format(list(archives))) elif args.archives is None and not a_archives: archives = None if util.debugging: print_debug("args.archives 2: {}".format(list(args.archives))) print_debug("archives 2: {}".format(list(archives))) elif args.archives is not None: archives = list(args.archives) if util.debugging: print_debug("args.archives 3: {}".format(str(args.archives))) print_debug("archives 3: {}".format(str(archives))) else: archives = [] if archives_from_git: if util.debugging: print_debug("a_archives_from_git: {}".format( list(a_archives_from_git))) if args.archives_from_git is None and a_archives_from_git: archives_from_git = list(a_archives_from_git) if util.debugging: print_debug("args.archives_from_git 1: {}".format( list(args.archives_from_git))) print_debug("archives_from_git 1: {}".format( list(archives_from_git))) elif args.archives_from_git is None and not a_archives_from_git: archives_from_git = None if util.debugging: print_debug("args.archives_from_git 2: {}".format( list(args.archives_from_git))) print_debug("archives_from_git 2: {}".format( list(archives_from_git))) elif args.archives_from_git is not None: archives_from_git = list(args.archives_from_git) if util.debugging: print_debug("args.archives_from_git 3: {}".format( str(args.archives_from_git))) print_debug("archives_from_git 3: {}".format( str(archives_from_git))) else: archives_from_git = [] if not args.target: parser.error( argparse.ArgumentTypeError("The target option is not valid")) else: # target path must exist or be created os.makedirs(args.target, exist_ok=True) if not url: parser.error( argparse.ArgumentTypeError( "the url argument or options.conf['package']['url'] is required" )) if archives: if len(archives) % 2 != 0: parser.error( argparse.ArgumentTypeError( "-a/--archives or options.conf['package']['archives'] requires an " "even number of arguments")) if archives_from_git: if len(archives_from_git) % 3 != 0 and len(archives_from_git) % 5 != 0: parser.error( argparse.ArgumentTypeError( "-ag/--archives_from_git or options.conf['package']['archives_from_git'] requires " "3 or 5 arguments")) if args.prep_only: os.makedirs("workingdir", exists_ok=True) package( args, url, name, archives, archives_from_git, "./workingdir", download_from_git, branch, redownload_from_git, redownload_archive, force_module, force_fullclone, mock_dir, short_circuit, do_file_restart, ) else: with tempfile.TemporaryDirectory() as workingdir: package( args, url, name, archives, archives_from_git, workingdir, download_from_git, branch, redownload_from_git, redownload_archive, force_module, force_fullclone, mock_dir, short_circuit, do_file_restart, )
return 0, None if __name__ == "__main__": while(True): retval, id = poll() if retval == 0: continue elif retval == -2: print_message('No new jobs', 'ok') time.sleep(5) continue elif retval == -3: time.sleep(5) continue if retval: print_message('Job run error') # send error message to frontend poller request = json.dumps({ 'job_id': id, 'request': 'error', }) url = 'http://' + FRONTEND_POLLER_HOST try: r = requests.post(url, request) except Exception as e: print_debug(e) continue time.sleep(5)
def cipher(b_in: bytearray, key_schedule: List[bytearray], nr: int, debug: bool = False) -> bytearray: if debug: print("CIPHER (ENCRYPT):") print_debug(debug_get_round(0) + "input", bytearray_to_str(b_in)) state = b_in.copy() key = block_bytes(key_schedule, rotate=(not debug)) if debug: print_debug(debug_get_round(0) + "k_sch", bytearray_to_str(key[0])) addRoundKey(state, key[0]) for r in range(1, nr): if debug: print_debug(debug_get_round(r) + "start", bytearray_to_str(state)) subBytes(state) if debug: print_debug(debug_get_round(r) + "s_box", bytearray_to_str(state)) state = shiftRows(state, rotate=debug) if debug: print_debug(debug_get_round(r) + "s_row", bytearray_to_str(state)) state = mixColumns(state, rotate=debug) if debug: print_debug(debug_get_round(r) + "m_col", bytearray_to_str(state)) print_debug(debug_get_round(r) + "k_sch", bytearray_to_str(key[r])) addRoundKey(state, key[r]) if debug: print_debug(debug_get_round(nr) + "start", bytearray_to_str(state)) subBytes(state) if debug: print_debug(debug_get_round(nr) + "s_box", bytearray_to_str(state)) state = shiftRows(state, rotate=debug) if debug: print_debug(debug_get_round(nr) + "s_row", bytearray_to_str(state)) print_debug(debug_get_round(nr) + "k_sch", bytearray_to_str(key[-1])) addRoundKey(state, key[-1]) if debug: print_debug(debug_get_round(nr) + "output", bytearray_to_str(state)) print() return state
def __init__(self, rules): self.rules = defaultdict(list) nonfinals = set() finals = set() for rule in rules: self.rules[rule.left].append((rule.right, rule)) nonfinals.add(rule.left) for item in rule.right: if GrammarDriver.__isToken(item): finals.add(item) self.nonfinals = list(nonfinals) self.finals = list(finals) # print ("nonfinals" + str(self.nonfinals)) # print ("finals" + str(self.finals)) self.canBeEpsilon = defaultdict(bool) for nf in self.nonfinals: if self.__hasEpsilonRule(nf): self.canBeEpsilon[nf] = True self.canBeEpsilon["epsilon"] = True self.first = defaultdict(set) self.follow = defaultdict(set) for f in self.finals: self.first[f].add(f) # Compute FIRST and EPS didSomething = True while didSomething: didSomething = False for item in self.rules: productions = self.rules[item] for (production, rule) in productions: # print("handling " + str(item) + " -> " + str(production)) if production[0] == "epsilon": continue # A -> B C, first(B) subset_of first(A) if GrammarDriver.__addSetToSet(self.first[item], self.first[production[0]]): # print("A -> B C, first(B) subset_of first(A)") # print("first of " + str(item) + " += " + str(self.first[production[0]])) didSomething = True # A -> B1 B2 .. Bn C, if all B's can be epsilon, first(C) subset_of first(A) firstDefNonEpsilon = None for rhsItem in production: if not self.canBeEpsilon[rhsItem]: firstDefNonEpsilon = rhsItem break if firstDefNonEpsilon: if GrammarDriver.__addSetToSet( self.first[item], self.first[firstDefNonEpsilon]): didSomething = True else: if not self.canBeEpsilon[item]: self.canBeEpsilon[item] = True didSomething = True # Compute FOLLOW didSomething = True while didSomething: didSomething = False for item in self.rules: productions = self.rules[item] for (production, rule) in productions: #print("Processing: " + str(item) + " -> " + str(production)) # A -> B C D, first(C) subset_of follow(B), and first(D) subset_of follow(C) for i in range(len(production) - 1): whatToAdd = i + 1 while whatToAdd < len(production): if GrammarDriver.__addSetToSet( self.follow[production[i]], self.first[production[whatToAdd]]): didSomething = True # If C can be epsilon, then first(D) subset_of follow(B) if self.canBeEpsilon[production[whatToAdd]]: whatToAdd += 1 else: break # A -> B C D, follow(A) subset_of follow(D) for i in reversed(range(len(production))): if GrammarDriver.__addSetToSet( self.follow[production[i]], self.follow[item]): didSomething = True # If D can be epsilon, then follow(A) subset_of follow(C) if not self.canBeEpsilon[production[i]]: break # Compute PREDICT self.predictions = defaultdict(defaultdict) self.success = True for item in self.rules: productions = self.rules[item] for (production, rule) in productions: predictSet = set() # print_debug("Processing: " + str(item) + " -> " + str(production)) # A -> B C, predict to use this rule if the symbol in first(B) or if B can be epsilon and the symbol in first(C), or if all can be epsilon and symbol in follow(A). for i in range(len(production)): # print_debug("Case 1: adding to predictSet: " + str(self.first[production[i]])) GrammarDriver.__addSetToSet(predictSet, self.first[production[i]]) if not self.canBeEpsilon[production[i]]: break if i < len(production) - 1: # print_debug("Case 2: adding to predictSet: " + str(self.first[production[i]])) GrammarDriver.__addSetToSet( predictSet, self.first[production[i + 1]]) else: # all can be epsilon # print_debug("Case 3: adding to predictSet: " + str(self.first[production[i]])) GrammarDriver.__addSetToSet(predictSet, self.follow[item]) for f in predictSet: if f in self.predictions[item]: print_debug("Going to fail; rule " + str(item) + " -> " + str(production)) print_debug("Already in predictions[" + str(item) + "]: " + str(f)) self.success = False assert (False) # print_debug("Adding: predictions[" + str(item) + "][" + str(f) + "]") self.predictions[item][f] = (production, rule) # print_debug("first:") # for item in self.first: # print_debug(str(item) + " -> " + str(self.first[item])) # print_debug("follow:") # for item in self.follow: # print_debug(str(item) + " -> " + str(self.follow[item])) # print_debug("predict:") # for item in self.predictions: # for f in self.predictions[item]: # print_debug("(" + str(item) + ", " + str(f) + ") -> " + str(self.predictions[item][f])) assert (self.success)
def runSimulation(self, scenarioObj): now = datetime.now() date_time = now.strftime("%m-%d-%Y-%H-%M-%S") util.print_debug("\n === Run simulation === [" + date_time + "]") sim = self.sim npcList = self.npcList ego = self.ego init_degree = ego.state.rotation.y numOfTimeSlice = len(scenarioObj[0]) numOfNpc = len(scenarioObj) deltaDList = [[self.maxint for i in range(numOfTimeSlice)] for j in range(numOfNpc)] # 1-D: NPC; 2-D: Time Slice dList = [[self.maxint for i in range(numOfTimeSlice)] for j in range(numOfNpc)] # 1-D: NPC; 2-D: Time Slice spawns = sim.get_spawn() # Add NPCs: Hard code for now, the number of npc need to be consistent. # Add NPCs: Hard code for now, the number of npc need to be consistent. ################################################################ self.addNpcVehicle(lgsvl.Vector(1610.6, 88.38, -620.9)) self.addNpcVehicle(lgsvl.Vector(1640.6, 88.38, -608.9)) ################################################################ for npc in npcList: npc.follow_closest_lane(True, random.randint(1, 9)) self.isEgoFault = False self.isHit = False def on_collision(agent1, agent2, contact): #util.print_debug(" --- On Collision, ego speed: " + str(agent1.state.speed) + ", NPC speed: " + str(agent2.state.speed)) if self.isHit == True: return self.isHit = True if agent2 is None or agent1 is None: self.isEgoFault = True util.print_debug(" --- Hit road obstacle --- ") return apollo = agent1 npcVehicle = agent2 if agent2.name == "XE_Rigged-apollo_3_5": apollo = agent2 npcVehicle = agent1 util.print_debug(" --- On Collision, ego speed: " + str(apollo.state.speed) + ", NPC speed: " + str(npcVehicle.state.speed)) if apollo.state.speed <= 0.005: self.isEgoFault = False return self.isEgoFault = liability.isEgoFault(apollo, npcVehicle, sim, init_degree) # Compute deltaD when it is ego fault if self.isEgoFault == True: self.egoFaultDeltaD = self.findCollisionDeltaD( apollo, npcVehicle) util.print_debug(" >>>>>>> Ego fault delta D is " + str(self.egoFaultDeltaD)) ego.on_collision(on_collision) # Frequency of action change of NPCs totalSimTime = self.totalSimTime actionChangeFreq = totalSimTime / numOfTimeSlice hitTime = numOfNpc for t in range(0, int(numOfTimeSlice)): # For every npc i = 0 for npc in npcList: self.setNpcSpeed(npc, scenarioObj[i][t][0]) turnCommand = scenarioObj[i][t][1] #<0: no turn; 1: left; 2: right> if turnCommand == 1: direction = "LEFT" self.setNpcChangeLane(npc, direction) elif turnCommand == 2: direction = "RIGHT" self.setNpcChangeLane(npc, direction) i += 1 # Stop if there is accident if self.isEgoFault == True or liability.isHitEdge( ego, sim, init_degree): self.isHit = True self.isEgoFault = True if self.isHit == True: hitTime = t break # Record the min delta D and d minDeltaD = self.maxint npcDeltaAtTList = [0 for i in range(numOfNpc)] minD = self.maxint npcDAtTList = [0 for i in range(numOfNpc)] for j in range(0, int(actionChangeFreq) * 4): k = 0 # k th npc for npc in npcList: # Update delta D curDeltaD = self.findDeltaD(ego, npc) if minDeltaD > curDeltaD: minDeltaD = curDeltaD npcDeltaAtTList[k] = minDeltaD # Update d curD = liability.findDistance(ego, npc) if minD > curD: minD = curD npcDAtTList[k] = minD #util.print_debug(" --- current d is " + str(liability.findDistance(ego, npc))) k += 1 # Check if bridge is disconnected or if there is failure in log's last line if self.isHit == True: time.sleep(10) fbr = open(self.bridgeLogPath, 'r') fbrLines = fbr.readlines() for line in fbrLines: pass while not ego.bridge_connected or "fail" in line or "Fail" in line or "overflow" in line: time.sleep(5) resultDic = {} resultDic['fitness'] = '' resultDic['fault'] = '' util.print_debug(" ---- Bridge is cut off ----") return resultDic sim.run(0.25) #################################### k = 0 # kth npc for npc in npcList: deltaDList[k][t] = npcDeltaAtTList[k] dList[k][t] = npcDAtTList[k] k += 1 # Process deltaDList and compute fitness scores # Make sure it is not 0, cannot divide by 0 in GA fitness_score = self.findFitness(deltaDList, dList, self.isEgoFault, self.isHit, hitTime) resultDic = {} resultDic['fitness'] = (fitness_score + self.maxint) / float( len(npcList) - 1) # Try to make sure it is positive resultDic['fault'] = '' if self.isEgoFault == True: resultDic['fault'] = 'ego' util.print_debug(" === Finish simulation === ") util.print_debug(resultDic) return resultDic
def inv_cipher(b_in: bytearray, key_schedule: List[bytearray], nr: int, debug: bool = False) -> bytearray: if debug: print("INVERSE CIPHER (DECRYPT):") print_debug(debug_get_round(0) + "iinput", bytearray_to_str(b_in)) state = b_in.copy() key = block_bytes(key_schedule, rotate=(not debug)) if debug: print_debug(debug_get_round(0) + "ik_sch", bytearray_to_str(key[-1])) addRoundKey(state, key[-1]) for r in range(nr - 1, 0, -1): if debug: print_debug( debug_get_round(nr - r) + "istart", bytearray_to_str(state)) state = invShiftRows(state, rotate=debug) if debug: print_debug( debug_get_round(nr - r) + "is_row", bytearray_to_str(state)) invSubBytes(state) if debug: print_debug( debug_get_round(nr - r) + "is_box", bytearray_to_str(state)) print_debug( debug_get_round(nr - r) + "ik_sch", bytearray_to_str(key[r])) addRoundKey(state, key[r]) if debug: print_debug( debug_get_round(nr - r) + "ik_add", bytearray_to_str(state)) state = invMixColumns(state, rotate=debug) if debug: print_debug(debug_get_round(nr) + "istart", bytearray_to_str(state)) state = invShiftRows(state, rotate=debug) if debug: print_debug(debug_get_round(nr) + "is_row", bytearray_to_str(state)) invSubBytes(state) if debug: print_debug(debug_get_round(nr) + "is_box", bytearray_to_str(state)) print_debug(debug_get_round(nr) + "ik_sch", bytearray_to_str(key[0])) addRoundKey(state, key[0]) if debug: print_debug(debug_get_round(nr) + "ioutput", bytearray_to_str(state)) return state
def finishup(config, job_sets, state_path, event_list, status, display_event, thread_list, kill_event): message = 'Performing post run cleanup' event_list.push(message=message) if not config.get('global').get('no_cleanup', False): print 'Not cleaning up temp directories' else: tmp = os.path.join(config['global']['output_path'], 'tmp') if os.path.exists(tmp): rmtree(tmp) message = 'All processing complete' if status == 1 else "One or more job failed" emailaddr = config.get('global').get('email') if emailaddr: event_list.push( message='Sending notification email to {}'.format(emailaddr)) try: if status == 1: msg = 'Post processing for {exp} has completed successfully\n'.format( exp=config['global']['experiment']) else: msg = 'One or more job(s) for {exp} failed\n\n'.format( exp=config['global']['experiment']) for job_set in job_sets: msg += '\nYearSet {start}-{end}: {status}\n'.format( start=job_set.set_start_year, end=job_set.set_end_year, status=job_set.status) for job in job_set.jobs: if job.status == JobStatus.COMPLETED: if job.config.get('host_url'): msg += ' > {job} - COMPLETED :: output hosted :: {url}\n'.format( url=job.config['host_url'], job=job.type) else: msg += ' > {job} - COMPLETED :: output located :: {output}\n'.format( output=job.output_path, job=job.type) elif job.status in [JobStatus.FAILED, JobStatus.CANCELLED]: output_path = os.path.join( job.config['run_scripts_path'], '{job}_{start:04d}_{end:04d}.out'.format( job=job.type, start=job.start_year, end=job.end_year)) msg += ' > {job} - {status} :: console output :: {output}\n'.format( output=output_path, job=job.type, status=job.status) else: msg += ' > {job} - {state}\n'.format( job=job.type, state=job.status) msg += '\n\n' m = Mailer(src='*****@*****.**', dst=emailaddr) m.send(status=message, msg=msg) except Exception as e: print_debug(e) event_list.push(message=message) display_event.set() print_type = 'ok' if status == 1 else 'error' print_message(message, print_type) logging.info("All processes complete") for t in thread_list: kill_event.set() t.join(timeout=1.0) time.sleep(2)
def git_archive_all(path, name, url, branch, force_module, force_fullclone, conf, is_fatal=True): """Clone package directly from a git repository.""" cmd_args = f"{branch} {url} {name}" clone_path = f"{path}{name}" if util.debugging: print_debug(f"path: {path}") print_debug(f"force_module {str(force_module)}") print_debug(f"force_fullclone {str(force_fullclone)}") is_url = validators.url(url) if is_url is True: if "pypi.org/project/" in url: latest_pypi_source = latest_pypi(url, output_format="source", pre_ok=True) print_info(f"pypi.org/project/: {latest_pypi_source}") latest_pypi_source_basename=os.path.basename(latest_pypi_source) download.do_curl(latest_pypi_source, dest=f"./{latest_pypi_source_basename}", is_fatal=True) absolute_url_file=f"file://{os.path.abspath(latest_pypi_source_basename)}" return absolute_url_file else: git_clone(url=url, path=path, cmd_args=cmd_args, clone_path=clone_path, force_module=force_module, force_fullclone=force_fullclone, is_fatal=is_fatal) try: outputVersion = find_version_git(url=url, clone_path=clone_path, path=path, conf=conf) except: if is_fatal: remove_clone_archive(path, clone_path, is_fatal) print_fatal(f"Unexpected error: {sys.exc_info()[0]}") sys.exit(1) if not outputVersion.startswith("v") and not outputVersion.startswith("V"): outputVersion = f"v{outputVersion}" clone_file = f"{name}-{outputVersion}.tar.gz" absolute_file_path = os.path.abspath(clone_file) absolute_url_file = f"file://{absolute_file_path}" if util.debugging: print_debug(f"{clone_file}") print_debug(f"clone_path: {clone_path}") print_debug(f"absolute_file_path: {absolute_file_path}") print_debug(f"absolute_url_file: {absolute_url_file}") try: process = subprocess.run( f"tar --create --file=- {clone_path}/ | pigz -9 -p 20 > {clone_file}", check=True, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, universal_newlines=True, cwd=path, ) except subprocess.CalledProcessError as err: remove_clone_archive(path, clone_path, is_fatal) print_fatal(f"Unable to archive {clone_path} in {clone_file} from {url}: {err}") sys.exit(1) remove_clone_archive(path, clone_path, is_fatal) return absolute_url_file else: if os.path.isdir(url): clone_path = url outputVersion = find_version_git(url=url, clone_path=clone_path, path=path, conf=conf) if not outputVersion.startswith("v") and not outputVersion.startswith("V"): outputVersion = f"v{outputVersion}" clone_file = f"{name}-{outputVersion}.tar.gz" clone_path_norm = os.path.normpath(clone_path) absolute_file_path = os.path.abspath(clone_file) absolute_url_file = f"file://{absolute_file_path}" if util.debugging: print_debug(f"{clone_file}") print_debug(f"clone_path: {clone_path}") print_debug(f"absolute_file_path: {absolute_file_path}") print_debug(f"absolute_url_file: {absolute_url_file}") try: process = subprocess.run( f"tar --create --exclude=.github --exclude=.git --file=- {os.path.basename(clone_path_norm)}/ | pigz -9 -p 20 > {absolute_file_path}", check=True, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, universal_newlines=True, cwd=os.path.dirname(clone_path_norm), ) except subprocess.CalledProcessError as err: if is_fatal: remove_clone_archive(path, clone_path, is_fatal) print_fatal(f"Unable to archive {clone_path} in {clone_file} from {url}: {err}") sys.exit(1) return absolute_url_file else: print_fatal(f"Unable to archive {clone_path} in {clone_file} from {url}") sys.exit(1)
def get_temperature_by_smartctl(device): ''' [root@gespenst ~]# smartctl --all /dev/sdb ... SMART Attributes Data Structure revision number: 16 Vendor Specific SMART Attributes with Thresholds: ID# ATTRIBUTE_NAME FLAG VALUE WORST THRESH TYPE UPDATED WHEN_FAILED RAW_VALUE 1 Raw_Read_Error_Rate 0x002f 200 200 051 Pre-fail Always - 0 3 Spin_Up_Time 0x0027 188 180 021 Pre-fail Always - 1566 4 Start_Stop_Count 0x0032 100 100 000 Old_age Always - 129 5 Reallocated_Sector_Ct 0x0033 200 200 140 Pre-fail Always - 0 7 Seek_Error_Rate 0x002e 100 253 000 Old_age Always - 0 9 Power_On_Hours 0x0032 099 099 000 Old_age Always - 863 10 Spin_Retry_Count 0x0032 100 100 051 Old_age Always - 0 11 Calibration_Retry_Count 0x0032 100 100 000 Old_age Always - 0 12 Power_Cycle_Count 0x0032 100 100 000 Old_age Always - 124 192 Power-Off_Retract_Count 0x0032 200 200 000 Old_age Always - 103 193 Load_Cycle_Count 0x0032 180 180 000 Old_age Always - 62042 194 Temperature_Celsius 0x0022 118 097 000 Old_age Always - 29 196 Reallocated_Event_Count 0x0032 200 200 000 Old_age Always - 0 197 Current_Pending_Sector 0x0032 200 200 000 Old_age Always - 0 198 Offline_Uncorrectable 0x0030 100 253 000 Old_age Offline - 0 199 UDMA_CRC_Error_Count 0x0032 200 200 000 Old_age Always - 0 200 Multi_Zone_Error_Rate 0x0008 100 253 051 Old_age Offline - 0 .. ''' command = "smartctl" args = list() args.append('-a') args.append(device) # go go go (rc, output) = Execute.with_output(command, args) ''' [root@gespenst uvtemp]# smartctl /dev/sdf smartctl 5.40 2010-10-16 r3189 [i386-redhat-linux-gnu] (local build) Copyright (C) 2002-10 by Bruce Allen, http://smartmontools.sourceforge.net Smartctl open device: /dev/sdf failed: No such device [root@gespenst uvtemp]# echo $? 2 ''' """ Too many obsecure conditions Try to parse and ignore if the parse fails [mcmaster@gespenst uvtemp]$ smartctl /dev/sda smartctl 5.40 2010-10-16 r3189 [i386-redhat-linux-gnu] (local build) Copyright (C) 2002-10 by Bruce Allen, http://smartmontools.sourceforge.net Smartctl open device: /dev/sda failed: Permission denied rc_adj = rc / 256 if rc_adj == 4: ''' ... smartctl 5.40 2010-10-16 r3189 [i386-redhat-linux-gnu] (local build) Copyright (C) 2002-10 by Bruce Allen, http://smartmontools.sourceforge.net === START OF INFORMATION SECTION === Model Family: Indilinx Barefoot based SSDs Device Model: OCZ-AGILITY ... Warning: device does not support Error Logging Warning! SMART ATA Error Log Structure error: invalid SMART checksum. SMART Error Log Version: 1 No Errors Logged Warning! SMART Self-Test Log Structure error: invalid SMART checksum. SMART Self-test log structure revision number 1 No self-tests have been logged. [To run self-tests, use: smartctl -t] Device does not support Selective Self Tests/Logging Still had table info though, but not temp ''' return None elif not rc == 0: print output # This happens for a number of reasons, hard to guage print 'Bad rc: %d (%d)' % (rc_adj, rc) return None """ if output is None: return None print_debug() print_debug() print_debug(output) print_debug() print_debug() # 194 Temperature_Celsius 0x0022 117 097 000 Old_age Always - 30 re_res = re.search(".*Temperature_Celsius.*", output) if re_res is None: return None line = re_res.group() if line is None: return None print_debug('line: %s' % repr(line)) worst_temp = float(line.split()[4]) print_debug('worst: %s' % worst_temp) cur_temp = float(line.split()[9]) print_debug('cur: %s' % cur_temp) return (cur_temp, worst_temp)
def update(self): print_debug() print_debug('update') self.regen_devices()
def get_tokens(self): ''' Returns a list of (k, v) pairs If it has no v, v will be None Tokens can have quotes around them Ex: n"TIFF c:NONE r:CROP" Internally, we do not store these Instead, they will be re-added when writing ''' tokens = list() i = 0 while i < len(self.text): k = '' v = None # Find the key: keep going until we hit either ", number, or space while i < len(self.text): c = self.text[i] # End of this k/v? if c == ' ': i += 1 break # A quoted value? elif c == '"': i += 1 v = '' # Note we skip the " while True: if i >= len(self.text): raise Exception('Missing closing " on %s' % self.text) c = self.text[i] if c == '"': i += 1 break v += c i += 1 # Think we should have at most one quoted thingy break # A numeric value? elif c in '+-0123456789': v = '' # Note we include the original char while i < len(self.text): c = self.text[i] if c == ' ': i += 1 break v += c i += 1 break else: k += c i += 1 # Discard extra spaces and some other corner cases if len(k) > 0 : tokens.append((k, v)) print_debug(tokens) return tokens
util.print_debug(resultDic) return resultDic ##################################### MAIN ################################### # Read scenario obj objPath = sys.argv[1] resPath = sys.argv[2] objF = open(objPath, 'rb') scenarioObj = pickle.load(objF) objF.close() resultDic = {} try: sim = LgApSimulation() resultDic = sim.runSimulation(scenarioObj) except Exception as e: util.print_debug(e.message) resultDic['fitness'] = '' resultDic['fault'] = '' # Send fitness score int object back to ge if os.path.isfile(resPath) == True: os.system("rm " + resPath) f_f = open(resPath, 'wb') pickle.dump(resultDic, f_f) f_f.truncate() f_f.close()
def get_tokens(self): ''' Returns a list of (k, v) pairs If it has no v, v will be None Tokens can have quotes around them Ex: n"TIFF c:NONE r:CROP" Internally, we do not store these Instead, they will be re-added when writing ''' tokens = list() i = 0 # Some version have a0, some have a=0 although a0 seems much more common while i < len(self.text): k = '' v = None # Find the key: keep going until we hit either ", number, or space while i < len(self.text): c = self.text[i] # End of this k/v? if c == ' ': i += 1 break # A quoted value? elif c == '"': i += 1 v = '' # Note we skip the " while True: if i >= len(self.text): raise Exception('Missing closing " on %s' % self.text) c = self.text[i] if c == '"': i += 1 break v += c i += 1 # Think we should have at most one quoted thingy break # A numeric value? elif c in '+-0123456789': v = '' # Note we include the original char while i < len(self.text): c = self.text[i] if c == ' ': i += 1 break v += c i += 1 break else: # This may not be bulletproof but I think its good enough # These lines show up when you add images in Hugin # ex bad: a=a but I'm not sure thats valid anyway if c != '=': k += c i += 1 # Discard extra spaces and some other corner cases if len(k) > 0 : tokens.append((k, v)) print_debug(tokens) return tokens
def ga(self): # Load from checkpoint if not none if self.ck_path != None: ck = open(self.ck_path, 'rb') self.pop = pickle.load(ck) ck.close() elif self.isInLis == False: self.init_pop() best, bestIndex = self.find_best() self.g_best = copy.deepcopy(best) # Start evolution for i in range(self.max_gen): # i th generation. util.print_debug(" \n\n*** " + str(i) + "th generation ***") # Make sure we clear touched_chs history book every gen self.touched_chs = [] self.cross() self.mutation(i) self.select_roulette() best, bestIndex = self.find_best() # Find the scenario with the best fitness score in current generation self.bests[i] = best # Record the scenario with the best fitness score in i th generation ########### Update noprogressCounter ######### noprogress = False ave = 0 if i >= self.lastRestartGen + 5: for j in range(i - 5, i): ave += self.bests[j].y ave /= 5 if ave >= best.y: self.lastRestarGen = i noprogress = True util.print_debug(" ###### Best score of the generation: " + str(best.y) + " ###### ") if self.g_best.y < best.y: # Record the best fitness score across all generations self.g_best = copy.deepcopy(best) util.print_debug(" ###### Best score overall: " + str(self.g_best.y) + " ###### ") N_generation = self.pop N_b = self.g_best # Record the scenario with the best score over all generations # Update the checkpoint of the best scenario so far self.take_checkpoint(N_b, 'best_scenario.obj') # Checkpoint this generation self.take_checkpoint(N_generation, 'last_gen.obj') # Checkpoint every generation now = datetime.now() date_time = now.strftime("%m-%d-%Y-%H-%M-%S") self.take_checkpoint(N_generation, 'generation-' + str(i) + '-at-' + date_time) #################### Start the Restart Process ################### if noprogress == True and not self.isInLis: util.print_debug(" ###### Restart Based on Generation: " + str(i) + " ###### ") oldCkName = 'GaCheckpoints' newPop = generateRestart.generateRestart(oldCkName, 1000, self.bounds) self.pop = copy.deepcopy(newPop) self.hasRestarted = True best, self.bestIndex = self.find_best() self.bestYAfterRestart = best.y self.lastRestartGen = i #################### End the Restart Process ################### if os.path.exists("GaCheckpoints") == True: prePopPool = generateRestart.getAllCheckpoints('GaCheckpoints') simiSum = 0 for eachChs in self.pop: eachSimilarity = generateRestart.getSimularityOfScenarioVsPrevPop(eachChs, prePopPool) simiSum += eachSimilarity util.print_debug(" ==== Similarity compared with all prior generations: " + str(simiSum/float(self.pop_size))) # Log fitness etc f = open('Progress.log' ,'a') f.write(str(i) + " " + str(best.y) + " " + str(self.g_best.y) + " " + str(simiSum/float(self.pop_size)) + " " + date_time + "\n") f.close() if best.y > self.bestYAfterRestart: self.bestYAfterRestart = best.y if i > (self.lastRestartGen + self.minLisGen) and self.isInLis == False: # Only allow one level of recursion ################## Start LIS ################# util.print_debug(" \n\n === Start of Local Iterative Search === \n\n") # Increase mutation rate a little bit to jump out of local maxima lis = GeneticAlgorithm(self.bounds, (self.pm * 1.5), self.pc, self.pop_size, self.NPC_size, self.time_size, self.numOfGenInLis) lis.setLisPop(self.g_best) lis.setLisFlag() lisBestChs = lis.ga() util.print_debug(" --- Best fitness in LIS: " + str(lisBestChs.y)) if lisBestChs.y > self.g_best.y: # Let's replace this self.pop[bestIndex] = copy.deepcopy(lisBestChs) util.print_debug(" --- Find better scenario in LIS: LIS->" + str(lisBestChs.y) + ", original->" + str(self.g_best.y)) else: util.print_debug(" --- LIS does not find any better scenarios") util.print_debug(" \n\n === End of Local Iterative Search === \n\n") ################## End LIS ################ return self.g_best
def package( args, url, name, archives, archives_from_git, workingdir, download_from_git, branch, redownload_from_git, redownload_archive, force_module, force_fullclone, mock_dir, short_circuit, do_file_restart, ): """Entry point for building a package with autospec.""" conf = config.Config(args.target) conf.parse_config_files_early() if util.debugging: print_debug(f"url 1: {url}") new_archives_from_git = [] name_re_escaped = re.escape(name) # Download the source from git if necessary if download_from_git: giturl = url found_file = False fileslist = None download_file_full_path = "" if util.debugging: print_debug(f"url 2: {url}") print_debug(f"BRANCH 2: {branch}") # filename_re = re.compile(r"^{}{}".format(name, r"(-|-.)(\d+)(\.\d+)+\.tar\.gz")) filename_re = re.compile(r"^{}{}".format(name_re_escaped, r"-.*\.tar\.gz")) if os.path.basename(os.getcwd()) == name: package_path = "./" if util.debugging: print_debug(f"package_path 11: {package_path}") fileslist = os.listdir(package_path) fileslist.sort(key=os.path.getmtime) for filename in fileslist: if re.search(filename_re, filename): found_file = True download_file_full_path = "file://{}".format( os.path.abspath(f"{package_path}{filename}")) if util.debugging: print_debug( f"found old package_path 21: {download_file_full_path}" ) break if not found_file or redownload_from_git is True: download_file_full_path = git.git_archive_all( path=package_path, name=name, url=url, branch=branch, force_module=force_module, force_fullclone=force_fullclone, conf=conf) url = download_file_full_path if util.debugging: print_debug( f"download_file_full_path 11: {download_file_full_path}") print_debug(f"giturl 11: {giturl}") else: package_path = f"packages/{name}" if util.debugging: print_debug(f"package_path 12: {package_path}") fileslist = os.listdir(package_path) fileslist.sort(key=os.path.getmtime) for filename in fileslist: if re.search(filename_re, filename): found_file = True download_file_full_path = "file://{}".format( os.path.abspath(f"{package_path}{filename}")) if util.debugging: print_debug( f"found old package_path 22: {download_file_full_path}" ) break if not found_file or redownload_from_git is True: download_file_full_path = git.git_archive_all( path=package_path, name=name, url=url, branch=branch, force_module=force_module, force_fullclone=force_fullclone, conf=conf) url = download_file_full_path if util.debugging: print_debug( f"download_file_full_path 12: {download_file_full_path}") print_debug(f"giturl 12: {giturl}") else: giturl = "" url = download.do_curl_get_effective_url(url) if archives_from_git: arch_url = [] arch_destination = [] arch_branch = [] arch_submodule = [] arch_forcefullclone = [] if util.debugging: print_debug(f"ARCHIVES_GIT 2: {archives_from_git}") print_debug(f"archives in options.conf: {archives}\n\n") archives_re = re.compile(r"^file:\/\/") index_f = [] for index, url_entry in enumerate(archives): if archives_re.search(url_entry): index_f.append(index) if util.debugging: for x in range(len(index_f) - 1, -1, -1): print_debug( f"rm {index_f[x]}:{archives[index_f[x]]} {index_f[x] + 1}:{archives[index_f[x] + 1]}" ) for x in sorted(range(len(index_f) - 1, -1, -1), reverse=True): del archives[index_f[x]:index_f[x] + 2] if util.debugging: print_debug(f"archives in options.conf: {archives}") for aurl, dest, br, sm, ffc in zip(archives_from_git[::5], archives_from_git[1::5], archives_from_git[2::5], archives_from_git[3::5], archives_from_git[4::5]): arch_url.append(aurl) arch_destination.append(dest) arch_branch.append(br) arch_submodule.append(sm) arch_forcefullclone.append(ffc) if util.debugging: print_debug( f"FOR ZIP {arch_url[-1]} - {arch_destination[-1]} - {arch_branch[-1]} - {arch_submodule[-1]} - {arch_forcefullclone[-1]}" ) for index, new_arch_url in enumerate(arch_url, start=0): found_file = False fileslist = [] download_file_full_path = "" arch_name = os.path.splitext(os.path.basename(new_arch_url))[0] arch_name_re_escaped = re.escape(arch_name) filename_re = re.compile(r"^{}{}".format(arch_name_re_escaped, r"-.*\.tar\.gz")) if util.debugging: print_debug(f"arch_name: {arch_name}") if os.path.basename(os.getcwd()) == name: package_path = "./" if util.debugging: print_debug(f"archive package_path 1: {package_path}") for filename in os.scandir(package_path): if filename.is_file(): if filename_re.search(filename.name): found_file = True download_file_full_path = "file://{}".format( os.path.abspath( f"{package_path}{filename.name}")) if util.debugging: print_debug(f"filename: {filename.name}") print_debug(f"Index: {index}") print_debug( f"Destination: {arch_destination[index]} - Branch: {arch_branch[index]}" ) print_debug( f"archive found 1: {arch_name} - {download_file_full_path}" ) break if not found_file or redownload_archive is True: if util.debugging: print_debug(f"Index: {index}") print_debug( f"Destination: {arch_destination[index]} - Branch: {arch_branch[index]}" ) print_debug( f"Fazer download archive 1: {arch_name} - {new_arch_url}" ) download_file_full_path = git.git_archive_all( path=package_path, name=arch_name, url=new_arch_url, branch=arch_branch[index], force_module=str_to_bool(arch_submodule[index]), force_fullclone=str_to_bool( arch_forcefullclone[index]), conf=conf) if util.debugging: print_debug( f"archive download_file_full_path 1: {download_file_full_path}" ) if download_file_full_path in archives or arch_destination[ index] in archives: print_info(f"\nAlready in archives: {archives}") else: archives.append(download_file_full_path) archives.append(arch_destination[index]) print_info(f"\nAdding to archives: {archives}") new_archives_from_git.append(arch_url[index]) new_archives_from_git.append(arch_destination[index]) new_archives_from_git.append(arch_branch[index]) new_archives_from_git.append(arch_submodule[index]) new_archives_from_git.append(arch_forcefullclone[index]) else: package_path = f"packages/{name}" if util.debugging: print_debug(f"archive package_path 2: {package_path}") for filename in os.scandir(package_path): if filename.is_file(): if filename_re.search(filename.name): found_file = True download_file_full_path = "file://{}".format( os.path.abspath( f"{package_path}{filename.name}")) if util.debugging: print_debug(f"Index: {index}") print_debug( f"Destination: {arch_destination[index]} - Branch: {arch_branch[index]}" ) print_debug( f"archive found 2: {arch_name} - {download_file_full_path}" ) break if not found_file or redownload_archive is True: if util.debugging: print_debug(f"Index: {index}") print_debug( f"Destination: {arch_destination[index]} - Branch: {arch_branch[index]}" ) print_debug( f"Fazer download archive 2: {arch_name} - {new_arch_url}" ) download_file_full_path = git.git_archive_all( path=package_path, name=arch_name, url=new_arch_url, branch=arch_branch[index], force_module=str_to_bool(arch_submodule[index]), force_fullclone=str_to_bool( arch_forcefullclone[index]), conf=conf) if util.debugging: print_debug( f"archive download_file_full_path 2: {download_file_full_path}" ) if download_file_full_path in archives or arch_destination[ index] in archives: print_info(f"\nAlready in archives: {archives}") else: archives.append(download_file_full_path) archives.append(arch_destination[index]) print_info(f"\nAdding to archives: {archives}") new_archives_from_git.append(arch_url[index]) new_archives_from_git.append(arch_destination[index]) new_archives_from_git.append(arch_branch[index]) new_archives_from_git.append(arch_submodule[index]) new_archives_from_git.append(arch_forcefullclone[index]) if util.debugging: print_debug(f"new_archives_from_git: {new_archives_from_git}\n") #check_requirements(args.git) conf.detect_build_from_url(url) package = build.Build() # # First, download the tarball, extract it and then do a set # of static analysis on the content of the tarball. # filemanager = files.FileManager(conf, package, mock_dir, short_circuit) if util.debugging: print_debug(f"url 4: {url}") print_debug(f"archives 4: {archives}") print_debug(f"new_archives_from_git 4: {new_archives_from_git}") content = tarball.Content(url, name, args.version, archives, conf, workingdir, giturl, download_from_git, branch, new_archives_from_git, force_module, force_fullclone) content.process(filemanager) conf.create_versions(content.multi_version) conf.content = content # hack to avoid recursive dependency on init # Search up one level from here to capture multiple versions _dir = content.path conf.setup_patterns() conf.config_file = args.config requirements = buildreq.Requirements(content.url) requirements.set_build_req(conf) conf.parse_config_files(args.bump, filemanager, content.version, requirements) conf.setup_patterns(conf.failed_pattern_dir) conf.parse_existing_spec(content.name) if args.prep_only: write_prep(conf, workingdir, content) exit(0) if args.license_only: try: with open( os.path.join(conf.download_path, content.name + ".license"), "r", ) as dotlic: for word in dotlic.read().split(): if ":" not in word: license.add_license(word) except Exception: pass # Start one directory higher so we scan *all* versions for licenses license.scan_for_licenses(os.path.dirname(_dir), conf, name) exit(0) if short_circuit == "prep" or short_circuit is None: requirements.scan_for_configure(_dir, content.name, conf) specdescription.scan_for_description(content.name, _dir, conf.license_translations, conf.license_blacklist) # Start one directory higher so we scan *all* versions for licenses license.scan_for_licenses(os.path.dirname(_dir), conf, content.name) commitmessage.scan_for_changes(conf.download_path, _dir, conf.transforms) conf.add_sources(archives, content) check.scan_for_tests(_dir, conf, requirements, content) # # Now, we have enough to write out a specfile, and try to build it. # We will then analyze the build result and learn information until the # package builds # specfile = specfiles.Specfile(content.url, content.version, content.name, content.release, conf, requirements, content, mock_dir, short_circuit) filemanager.load_specfile(specfile) load_specfile(conf, specfile) if args.integrity: interactive_mode = not args.non_interactive pkg_integrity.check(url, conf, interactive=interactive_mode) pkg_integrity.load_specfile(specfile) if short_circuit == "prep" or short_circuit is None: conf.create_buildreq_cache(content.version, requirements.buildreqs_cache) # conf.create_reqs_cache(content.version, requirements.reqs_cache) specfile.write_spec() filemanager.load_specfile_information(specfile, content) if short_circuit == "prep": util.call( f"sudo rm -rf {mock_dir}/clear-{content.name}/root/builddir/build/SRPMS/" ) util.call( f"sudo rm -rf {mock_dir}/clear-{content.name}/root/builddir/build/BUILD/" ) if short_circuit == "install": util.call( f"sudo rm -rf {mock_dir}/clear-{content.name}/root/builddir/build/RPMS/" ) while 1: package.package( filemanager, args.mock_config, args.mock_opts, conf, requirements, content, mock_dir, short_circuit, do_file_restart, args.cleanup, ) if (short_circuit != package.short_circuit): print_info(f"short_circuit: {short_circuit}") print_info(f"package.short_circuit: {package.short_circuit}") short_circuit = package.short_circuit print_info(f"new short_circuit: {short_circuit}") filemanager.load_specfile_information(specfile, content) filemanager.load_specfile(specfile) specfile.write_spec() filemanager.newfiles_printed = 0 mock_chroot = f"{mock_dir}/clear-{package.uniqueext}/root/builddir/build/BUILDROOT/{content.name}-{content.version}-{content.release}.x86_64" if filemanager.clean_directories(mock_chroot): # directories added to the blacklist, need to re-run package.must_restart += 1 print_info(f"filemanager.clean_directories({mock_chroot})") if do_file_restart: if package.round > 20 or (package.must_restart == 0 and package.file_restart == 0): if (short_circuit == "install"): print_info(f"short_circuit: {short_circuit}") print_info( f"package.short_circuit: {package.short_circuit}") short_circuit = "binary" print_info(f"new short_circuit: {short_circuit}") continue else: break else: if (package.round > 20 or package.must_restart == 0): break save_mock_logs(conf.download_path, package.round) #if short_circuit is None or short_circuit == "install": #check.check_regression(conf.download_path, conf.config_opts["skip_tests"]) #conf.create_buildreq_cache(content.version, requirements.buildreqs_cache) #conf.create_reqs_cache(content.version, requirements.reqs_cache) if package.success == 0: #conf.create_buildreq_cache(content.version, requirements.buildreqs_cache) print_fatal("Build failed, aborting") sys.exit(1) elif (package.success == 1): if os.path.isfile("README.clear"): try: print("\nREADME.clear CONTENTS") print("*********************") with open("README.clear", "r") as readme_f: print(readme_f.read()) print("*********************\n") except Exception: pass if (short_circuit is None): examine_abi(conf.download_path, content.name) #if os.path.exists("/var/lib/rpm"): #print("\nGenerating whatrequires\n") #pkg_scan.get_whatrequires(content.name, conf.yum_conf) write_out(conf.download_path + "/release", content.release + "\n") # record logcheck output #logcheck(conf.download_path) #if args.git: #print("\nTrying to guess the commit message\n") #commitmessage.guess_commit_message(pkg_integrity.IMPORTED, conf, content) #git.commit_to_git(conf, content.name, package.success) elif (short_circuit == "prep"): write_out(conf.download_path + "/release", content.release + "\n") #elif (short_circuit == "build"): # record logcheck output #logcheck(conf.download_path) #elif (short_circuit == "install"): ## record logcheck output #logcheck(conf.download_path) elif (short_circuit == "binary"): examine_abi(conf.download_path, content.name) #if os.path.exists("/var/lib/rpm"): #print("\nGenerating whatrequires\n") #pkg_scan.get_whatrequires(content.name, conf.yum_conf) #write_out(conf.download_path + "/release", content.release + "\n") #if args.git: #print("\nTrying to guess the commit message\n") #commitmessage.guess_commit_message(pkg_integrity.IMPORTED, conf, content) #git.commit_to_git(conf, content.name, package.success) #else: #print("To commit your changes, git add the relevant files and run 'git commit -F commitmsg'") link_new_rpms_here()
def initialize(argv, **kwargs): """ Parse the commandline arguments, and setup the master config dict Parameters: argv (list): a list of arguments event_list (EventList): The main list of events kill_event (threading.Event): An event used to kill all running threads __version__ (str): the current version number for processflow __branch__ (str): the branch this version was built from """ # Setup the parser pargs = parse_args(argv=argv) if pargs.version: msg = 'Processflow version {}'.format(kwargs['version']) print msg sys.exit(0) if not pargs.config: parse_args(print_help=True) return False, False, False event_list = kwargs['event_list'] event = kwargs['kill_event'] print_line(line='Entering setup', event_list=event_list) # check if globus config is valid, else remove it globus_config = os.path.join(os.path.expanduser('~'), '.globus.cfg') if os.path.exists(globus_config): try: conf = ConfigObj(globus_config) except: os.remove(globus_config) if not os.path.exists(pargs.config): print "Invalid config, {} does not exist".format(pargs.config) return False, False, False # Check that there are no white space errors in the config file line_index = check_config_white_space(pargs.config) if line_index != 0: print ''' ERROR: line {num} does not have a space after the \'=\', white space is required. Please add a space and run again.'''.format(num=line_index) return False, False, False # read the config file and setup the config dict try: config = ConfigObj(pargs.config) except Exception as e: print_debug(e) print "Error parsing config file {}".format(pargs.config) parse_args(print_help=True) return False, False, False # run validator for config file messages = verify_config(config) if messages: for message in messages: print_message(message) return False, False, False try: setup_directories(pargs, config) except Exception as e: print_message('Failed to setup directories') print_debug(e) sys.exit(1) if pargs.resource_path: config['global']['resource_path'] = os.path.abspath( pargs.resource_path) else: config['global']['resource_path'] = os.path.join( sys.prefix, 'share', 'processflow', 'resources') # Setup boolean config flags config['global']['host'] = True if config.get('img_hosting') else False config['global']['always_copy'] = True if pargs.always_copy else False config['global']['dryrun'] = True if pargs.dryrun else False config['global']['debug'] = True if pargs.debug else False config['global']['verify'] = True if pargs.verify else False config['global']['max_jobs'] = pargs.max_jobs if pargs.max_jobs else False # setup logging if pargs.log: log_path = pargs.log else: log_path = os.path.join(config['global']['project_path'], 'output', 'processflow.log') print_line(line='Log saved to {}'.format(log_path), event_list=event_list) if not kwargs.get('testing'): from imp import reload reload(logging) config['global']['log_path'] = log_path if os.path.exists(log_path): logbak = log_path + '.bak' if os.path.exists(logbak): os.remove(logbak) copyfile(log_path, log_path + '.bak') log_level = logging.DEBUG if pargs.debug else logging.INFO logging.basicConfig(format='%(asctime)s:%(levelname)s: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=log_path, filemode='w', level=log_level) logging.getLogger('globus_sdk').setLevel(logging.ERROR) logging.getLogger('globus_cli').setLevel(logging.ERROR) logging.info("Running with config:") msg = json.dumps(config, sort_keys=False, indent=4) logging.info(msg) if pargs.max_jobs: print_line(line="running with maximum {} jobs".format(pargs.max_jobs), event_list=event_list) if not config['global']['host'] or not config.get('img_hosting'): print_line(line='Not hosting img output', event_list=event_list) msg = 'processflow version {} branch {}'.format(kwargs['version'], kwargs['branch']) logging.info(msg) # Copy the config into the input directory for safe keeping input_config_path = os.path.join(config['global']['project_path'], 'input', 'run.cfg') try: copy(pargs.config, input_config_path) except: pass if config['global']['always_copy']: msg = 'Running in forced-copy mode, previously hosted diagnostic output will be replaced' else: msg = 'Running without forced-copy, previous hosted output will be preserved' print_line(line=msg, event_list=event_list) # initialize the filemanager db = os.path.join(config['global'].get('project_path'), 'output', 'processflow.db') msg = 'Initializing file manager' print_line(msg, event_list) filemanager = FileManager(database=db, event_list=event_list, config=config) filemanager.populate_file_list() msg = 'Starting local status update' print_line(msg, event_list) filemanager.update_local_status() msg = 'Local status update complete' print_line(msg, event_list) msg = filemanager.report_files_local() print_line(msg, event_list) filemanager.write_database() all_data = filemanager.all_data_local() if all_data: msg = 'all data is local' else: msg = 'Additional data needed' print_line(msg, event_list) logging.info("FileManager setup complete") logging.info(str(filemanager)) if all_data: print_line(line="skipping globus setup", event_list=event_list) else: if config['global'].get('local_globus_uuid'): endpoints = [endpoint for endpoint in filemanager.get_endpoints()] local_endpoint = config['global'].get('local_globus_uuid') if local_endpoint: endpoints.append(local_endpoint) msg = 'Checking authentication for {} endpoints'.format(endpoints) print_line(line=msg, event_list=event_list) setup_success = setup_globus(endpoints=endpoints, event_list=event_list) if not setup_success: print "Globus setup error" return False, False, False else: print_line(line='Globus authentication complete', event_list=event_list) # setup the runmanager runmanager = RunManager(event_list=event_list, event=event, config=config, filemanager=filemanager) runmanager.setup_cases() runmanager.setup_jobs() runmanager.write_job_sets( os.path.join(config['global']['project_path'], 'output', 'state.txt')) return config, filemanager, runmanager