def screenSaverOn(self): if self.ssOff_trigger: debug.warning("No Motion triggered...screen saver being turned on") self.ssOff_trigger = False self.screensaver.runSaver() else: debug.warning("Ignoring no motion, screen saver not active")
def route(self, detour_scale): """ This does the A* maze routing with preferred direction routing. """ # We set a cost bound of the HPWL for run-time. This can be # over-ridden if the route fails due to pruning a feasible solution. cost_bound = detour_scale * self.cost_to_target( self.source[0]) * self.PREFERRED_COST # Make sure the queue is empty if we run another route while len(self.q) > 0: heappop(self.q) # Put the source items into the queue self.init_queue() cheapest_path = None cheapest_cost = None # Keep expanding and adding to the priority queue until we are done while len(self.q) > 0: # should we keep the path in the queue as well or just the final node? (cost, count, path) = heappop(self.q) debug.info( 2, "Queue size: size=" + str(len(self.q)) + " " + str(cost)) debug.info(3, "Expanding: cost=" + str(cost) + " " + str(path)) # expand the last element neighbors = self.expand_dirs(path) debug.info(3, "Neighbors: " + str(neighbors)) for n in neighbors: # node is added to the map by the expand routine newpath = path + [n] # check if we hit the target and are done if self.is_target(n): return (newpath, self.cost(newpath)) elif not self.map[n].visited: # current path cost + predicted cost current_cost = self.cost(newpath) target_cost = self.cost_to_target(n) predicted_cost = current_cost + target_cost # only add the cost if it is less than our bound if (predicted_cost < cost_bound): if (self.map[n].min_cost == -1 or current_cost < self.map[n].min_cost): self.map[n].visited = True self.map[n].min_path = newpath self.map[n].min_cost = predicted_cost debug.info( 3, "Enqueuing: cost=" + str(current_cost) + "+" + str(target_cost) + " " + str(newpath)) # add the cost to get to this point if we haven't reached it yet heappush(self.q, (predicted_cost, self.counter, newpath)) self.counter += 1 debug.warning( "Unable to route path. Expand the detour_scale to allow detours.") return (None, None)
def _run_ipbench(self, args, logfile): cmd = [siteconfig.get('IPBENCH_PATH')] + args firstrun = True for _ in range(IPBENCH_ITERATIONS): if firstrun: firstrun = False else: # sleep a moment to let things settle down between runs debug.verbose('sleeping between ipbench runs') time.sleep(IPBENCH_SLEEPTIME) debug.verbose('running ipbench: %s' % ' '.join(cmd)) child = subprocess.Popen(cmd, stdout=subprocess.PIPE) timeout = datetime.datetime.now() + IPBENCH_TIMEOUT while True: # wait for some output (rlist, _, _) = select_timeout(timeout, [child.stdout]) if not rlist: debug.warning('ipbench run timed out') child.terminate() child.wait() raise TimeoutError('waiting for ipbench') # read one char at a time to avoid blocking c = child.stdout.read(1) if c == '': break # EOF logfile.write(c) child.wait() assert (child.returncode == 0) # check for successful exit
def push_stmt(self, stmt): self.current = RecursionNode(stmt, self.current) if self._check_recursion(): debug.warning('catched recursion', stmt) self.pop_stmt() return True return False
def run_lvs(cell_name, gds_name, sp_name, final_verification=False): global lvs_warned if not lvs_warned: debug.warning("LVS unable to run.") lvs_warned=True # Since we warned, return a failing test. return 1
def check_preferred_teams(self): if not isinstance(self.preferred_teams, str) and not isinstance(self.preferred_teams, list): debug.warning("preferred_teams should be an array of team names or a single team name string. Using default preferred_teams, {}".format(DEFAULT_PREFERRED_TEAMS)) self.preferred_teams = DEFAULT_PREFERRED_TEAMS if isinstance(self.preferred_teams, str): team = self.preferred_teams self.preferred_teams = [team]
def iter_content(self): """ The index is here just ignored, because of all the appends, etc. lists/sets are too complicated too handle that. """ items = [] for array in evaluate.follow_call_list(self.var_args): if isinstance(array, evaluate.Instance) and len(array.var_args): temp = array.var_args[0][0] if isinstance(temp, ArrayInstance): # prevent recursions # TODO compare Modules if self.var_args.start_pos != temp.var_args.start_pos: items += temp.iter_content() else: debug.warning('ArrayInstance recursion', self.var_args) continue items += evaluate.get_iterator_types([array]) if self.var_args.parent_stmt is None: return [] # generated var_args should not be checked for arrays module = self.var_args.parent_stmt.get_parent_until() is_list = str(self.instance.name) == 'list' items += _check_array_additions(self.instance, module, is_list) return items
def __get_config(self, base_filename, error=None): # Look and return config.json file filename = "{}.json".format(base_filename) (reference_config, error) = self.read_json(filename) if not reference_config: if (error): debug.error(error) else: debug.error("Invalid {} config file. Make sure {} exists in config/".format(base_filename, base_filename)) sys.exit(1) if base_filename == "config": # Validate against the config.json debug.error("INFO: Validating config.json.....") conffile = "config/config.json" schemafile = "config/config.schema.json" confpath = get_file(conffile) schemapath = get_file(schemafile) (valid,msg) = validateConf(confpath,schemapath) if valid: debug.error("INFO: config.json passes validation") else: debug.warning("WARN: config.json fails validation: error: [{0}]".format(msg)) debug.warning("WARN: Rerun the nhl_setup app to create a valid config.json") sys.exit(1) return reference_config
def convert_pin(self): """ Convert the list of pin shapes into sets of routing grids. The secondary set of grids are "optional" pin shapes that could be should be either blocked or part of the pin. """ pin_set = set() partial_set = set() blockage_set = set() for pin_list in self.pins: for pin in pin_list: debug.info(2," Converting {0}".format(pin)) # Determine which tracks the pin overlaps (sufficient,insufficient)=self.router.convert_pin_to_tracks(self.name, pin) pin_set.update(sufficient) partial_set.update(insufficient) # Blockages will be a super-set of pins since it uses the inflated pin shape. blockage_in_tracks = self.router.convert_blockage(pin) blockage_set.update(blockage_in_tracks) # If we have a blockage, we must remove the grids # Remember, this excludes the pin blockages already shared_set = pin_set & self.router.blocked_grids if len(shared_set)>0: debug.info(2,"Removing pins {}".format(shared_set)) pin_set.difference_update(shared_set) shared_set = partial_set & self.router.blocked_grids if len(shared_set)>0: debug.info(2,"Removing pins {}".format(shared_set)) partial_set.difference_update(shared_set) shared_set = blockage_set & self.router.blocked_grids if len(shared_set)>0: debug.info(2,"Removing blocks {}".format(shared_set)) blockage_set.difference_update(shared_set) # At least one of the groups must have some valid tracks if (len(pin_set)==0 and len(partial_set)==0 and len(blockage_set)==0): #debug.warning("Pin is very close to metal blockage.\nAttempting to expand blocked pin {}".format(self.pins)) for pin_list in self.pins: for pin in pin_list: debug.warning(" Expanding conversion {0}".format(pin)) # Determine which tracks the pin overlaps (sufficient,insufficient)=self.router.convert_pin_to_tracks(self.name, pin, expansion=1) pin_set.update(sufficient) partial_set.update(insufficient) if len(pin_set)==0 and len(partial_set)==0: debug.error("Unable to find unblocked pin {} {}".format(self.name, self.pins)) self.router.write_debug_gds("blocked_pin.gds") # Consider all the grids that would be blocked self.grids = pin_set | partial_set # Remember the secondary grids for removing adjacent pins self.secondary_grids = partial_set debug.info(2," pins {}".format(self.grids)) debug.info(2," secondary {}".format(self.secondary_grids))
def updateUserData(): if (username == "bluepixels"): debug.warning("Not updating userdata for bluepixels") return (0) dbcon = dbTrayServer.dbTray() try: hostname = socket.gethostname() except: debug.error(sys.exc_info()) return (0) if (hostname.find("localhost") >= 0): return (0) try: dbcon.execute("insert into users (user,host) values (\"" + username + "\",\"" + hostname + "\") ") except: try: dbcon.execute("delete from users where host=\"" + hostname + "\"") except: debug.error(sys.exc_info()) try: dbcon.execute("delete from users where user=\"" + username + "\"") except: debug.error(sys.exc_info()) try: dbcon.execute("insert into users (user,host) values (\"" + username + "\",\"" + hostname + "\") ") except: debug.error(sys.exc_info())
def analytical_delay(self, slews, loads): """ Return the analytical model results for the SRAM. """ if OPTS.num_rw_ports > 1 or OPTS.num_w_ports > 0 and OPTS.num_r_ports > 0: debug.warning( "Analytical characterization results are not supported for multiport." ) power = self.analytical_power(slews, loads) port_data = self.get_empty_measure_data_dict() for slew in slews: for load in loads: self.set_load_slew(load, slew) bank_delay = self.sram.analytical_delay( self.vdd_voltage, self.slew, self.load) for port in self.all_ports: for mname in self.delay_meas_names + self.power_meas_names: if "power" in mname: port_data[port][mname].append(power.dynamic) elif "delay" in mname: port_data[port][mname].append( bank_delay[port].delay / 1e3) elif "slew" in mname: port_data[port][mname].append( bank_delay[port].slew / 1e3) else: debug.error( "Measurement name not recognized: {}".format( mname), 1) sram_data = {"min_period": 0, "leakage_power": power.leakage} return (sram_data, port_data)
def report_status(): """ Check for valid arguments and report the info about the SRAM being generated """ global OPTS # Check if all arguments are integers for bits, size, banks if type(OPTS.word_size)!=int: debug.error("{0} is not an integer in config file.".format(OPTS.word_size)) if type(OPTS.num_words)!=int: debug.error("{0} is not an integer in config file.".format(OPTS.sram_size)) if not OPTS.tech_name: debug.error("Tech name must be specified in config file.") debug.print_raw("Technology: {0}".format(OPTS.tech_name)) total_size = OPTS.word_size*OPTS.num_words*OPTS.num_banks debug.print_raw("Total size: {} bits".format(total_size)) if total_size>=2**14: debug.warning("Requesting such a large memory size ({0}) will have a large run-time. ".format(total_size) + "Consider using multiple smaller banks.") debug.print_raw("Word size: {0}\nWords: {1}\nBanks: {2}".format(OPTS.word_size, OPTS.num_words, OPTS.num_banks)) debug.print_raw("RW ports: {0}\nR-only ports: {1}\nW-only ports: {2}".format(OPTS.num_rw_ports, OPTS.num_r_ports, OPTS.num_w_ports)) if OPTS.netlist_only: debug.print_raw("Netlist only mode (no physical design is being done).") if not OPTS.inline_lvsdrc: debug.print_raw("DRC/LVS/PEX is only run on the top-level design.") if not OPTS.check_lvsdrc: debug.print_raw("DRC/LVS/PEX is completely disabled.")
def led_matrix_options(args): options = RGBMatrixOptions() if args.led_gpio_mapping != None: options.hardware_mapping = args.led_gpio_mapping options.rows = args.led_rows options.cols = args.led_cols options.chain_length = args.led_chain options.parallel = args.led_parallel options.row_address_type = args.led_row_addr_type options.multiplexing = args.led_multiplexing options.pwm_bits = args.led_pwm_bits options.brightness = args.led_brightness options.pwm_lsb_nanoseconds = args.led_pwm_lsb_nanoseconds options.led_rgb_sequence = args.led_rgb_sequence try: options.pixel_mapper_config = args.led_pixel_mapper except AttributeError: debug.warning("Your compiled RGB Matrix Library is out of date.") debug.warning("The --led-pixel-mapper argument will not work until it is updated.") if args.led_show_refresh: options.show_refresh_rate = 1 if args.led_slowdown_gpio != None: options.gpio_slowdown = args.led_slowdown_gpio if args.led_no_hardware_pulse: options.disable_hardware_pulsing = True return options
def _run_ipbench(self, args, logfile): cmd = [siteconfig.get('IPBENCH_PATH')] + args firstrun = True for _ in range(IPBENCH_ITERATIONS): if firstrun: firstrun = False else: # sleep a moment to let things settle down between runs debug.verbose('sleeping between ipbench runs') time.sleep(IPBENCH_SLEEPTIME) debug.verbose('running ipbench: %s' % ' '.join(cmd)) child = subprocess.Popen(cmd, stdout=subprocess.PIPE) timeout = datetime.datetime.now() + IPBENCH_TIMEOUT while True: # wait for some output (rlist, _, _) = select_timeout(timeout, [child.stdout]) if not rlist: debug.warning('ipbench run timed out') child.terminate() child.wait() raise TimeoutError('waiting for ipbench') # read one char at a time to avoid blocking c = child.stdout.read(1) if c == '': break # EOF logfile.write(c) child.wait() assert(child.returncode == 0) # check for successful exit
def update(self, force=False) -> UpdateStatus: status = UpdateStatus.SUCCESS if force or self.__should_update(): debug.log("Headlines should update!") self.starttime = time.time() feeds = [] debug.log("%d feeds to update...", len(self.feed_urls)) feedparser.USER_AGENT = "mlb-led-scoreboard/3.0 +https://github.com/MLB-LED-Scoreboard/mlb-led-scoreboard" if len(self.feed_urls) > 0: debug.log("Feed URLs found...") for idx, url in enumerate(self.feed_urls): if idx < HEADLINE_MAX_FEEDS: # Only parse MAX teams to prevent potential hangs debug.log("Fetching %s", url) f = feedparser.parse(url) try: title = f.feed.title.encode("ascii", "ignore") debug.log("Fetched feed '%s' with %d entries.", title, len(f.entries)) feeds.append(f) except AttributeError: debug.warning( "There was a problem fetching {}".format(url)) status = UpdateStatus.FAIL self.feed_data = feeds else: status = UpdateStatus.DEFERRED return status
def get_pref_teams_id(self): """ Finds the preferred teams ID. The type of Team information variate throughout the API except for the team's id. Working with that will be much easier. :return: list of the preferred team's ID in order """ try: allteams = self.teams pref_teams = self.config.preferred_teams allteams_id = {} pref_teams_id = [] # Put all the team's in a dict with there name as KEY and ID as value. for team in allteams: allteams_id[team.team_name] = team.team_id # Go through the list of preferred teams name. If the team's name exist, put the ID in a new list. if pref_teams: for team in pref_teams: if team in allteams_id: pref_teams_id.append(allteams_id[team]) else: debug.warning( team + " is not a team of the NHL. Make sure you typed team's name properly" ) return pref_teams_id else: return False except TypeError: return []
def check_preferred_divisions(self): if not isinstance(self.preferred_divisions, str) and not isinstance(self.preferred_divisions, list): debug.warning("preferred_divisions should be an array of division names or a single division name string. Using default preferred_divisions, {}".format(DEFAULT_PREFERRED_DIVISIONS)) self.preferred_divisions = DEFAULT_PREFERRED_DIVISIONS if isinstance(self.preferred_divisions, str): division = self.preferred_divisions self.preferred_divisions = [division]
def screenSaverOff(self): if self.data.screensaver_displayed and self.data.screensaver and not self.data.pb_trigger: debug.warning("Motion triggered...screen saver being turned off") self.ssOff_trigger = True self.screensaver.stopSaver() else: debug.warning("Ignoring motion, screen saver not active")
def update(self, force=False): if force == True or self.__should_update(): debug.log("Weather should update!") self.starttime = time.time() if self.apikey_valid: debug.log("API Key hasn't been flagged as bad yet") try: self.observation = self.client.weather_at_place( self.location) weather = self.observation.get_weather() self.temp = weather.get_temperature( self.temperature_unit)['temp'] self.wind_speed = weather.get_wind( self.speed_unit)['speed'] self.wind_dir = weather.get_wind(self.speed_unit)['deg'] self.conditions = weather.get_status() self.icon_name = weather.get_weather_icon_name() debug.log("Weather: {}; Wind: {}; {} ({})".format( self.temperature_string(), self.wind_string(), self.conditions, self.icon_filename())) except pyowm.exceptions.api_response_error.UnauthorizedError: debug.warning( "[WEATHER] The API key provided doesn't appear to be valid. Please check your config.json." ) debug.warning( "[WEATHER] You can get a free API key by visiting https://home.openweathermap.org/users/sign_up" ) self.apikey_valid = False
def filter_gds(cell_name, input_gds, output_gds): """ Run the gds through magic for any layer processing """ global OPTS # Copy .magicrc file into temp dir magic_file = OPTS.openram_tech + "tech/.magicrc" if os.path.exists(magic_file): shutil.copy(magic_file, OPTS.openram_temp) else: debug.warning("Could not locate .magicrc file: {}".format(magic_file)) run_file = OPTS.openram_temp + "run_filter.sh" f = open(run_file, "w") f.write("#!/bin/sh\n") f.write("{} -dnull -noconsole << EOF\n".format(OPTS.magic_exe[1])) f.write("gds polygon subcell true\n") f.write("gds warning default\n") f.write("gds read {}\n".format(input_gds)) f.write("load {}\n".format(cell_name)) f.write("cellname delete \\(UNNAMED\\)\n") #f.write("writeall force\n") f.write("select top cell\n") f.write("gds write {}\n".format(output_gds)) f.write("quit -noprompt\n") f.write("EOF\n") f.close() os.system("chmod u+x {}".format(run_file)) (outfile, errfile, resultsfile) = run_script(cell_name, "filter")
def run_pex(name, gds_name, sp_name, output=None, final_verification=False): global pex_warned if not pex_warned: debug.warning("PEX unable to run.") pex_warned=True # Since we warned, return a failing test. return 1
def analytical_delay(self, slews, loads): """ Return the analytical model results for the SRAM. """ if OPTS.num_rw_ports > 1 or OPTS.num_w_ports > 0 and OPTS.num_r_ports > 0: debug.warning("Analytical characterization results are not supported for multiport.") self.create_signal_names() self.create_measurement_names() power = self.analytical_power(slews, loads) port_data = self.get_empty_measure_data_dict() relative_loads = [logical_effort.convert_farad_to_relative_c(c_farad) for c_farad in loads] for slew in slews: for load in relative_loads: self.set_load_slew(load,slew) bank_delay = self.sram.analytical_delay(self.corner, self.slew,self.load) for port in self.all_ports: for mname in self.delay_meas_names+self.power_meas_names: if "power" in mname: port_data[port][mname].append(power.dynamic) elif "delay" in mname: port_data[port][mname].append(bank_delay[port].delay/1e3) elif "slew" in mname: port_data[port][mname].append(bank_delay[port].slew/1e3) else: debug.error("Measurement name not recognized: {}".format(mname),1) period_margin = 0.1 risefall_delay = bank_delay[self.read_ports[0]].delay/1e3 sram_data = { "min_period":risefall_delay*2*period_margin, "leakage_power": power.leakage} debug.info(2,"SRAM Data:\n{}".format(sram_data)) debug.info(2,"Port Data:\n{}".format(port_data)) return (sram_data,port_data)
def run_drc(cell_name, gds_name, extract=False, final_verification=False): global drc_warned if not drc_warned: debug.warning("DRC unable to run.") drc_warned=True # Since we warned, return a failing test. return 1
def route_signal(self, pin_name, src_idx, dest_idx): # First pass, try to route normally # Second pass, clear prior pin blockages so that you can route over other metal # of the same supply. Otherwise, this can create a lot of circular routes due to accidental overlaps. for unblock_routes in [False, True]: for detour_scale in [5 * pow(2, x) for x in range(5)]: debug.info(2, "Routing {0} to {1} with scale {2}".format(src_idx, dest_idx, detour_scale)) # Clear everything in the routing grid. self.rg.reinit() # This is inefficient since it is non-incremental, but it was # easier to debug. self.prepare_blockages() if unblock_routes: msg = "Unblocking supply self blockages to improve access (may cause DRC errors):\n{0}\n{1})" debug.warning(msg.format(pin_name, self.pin_groups[pin_name][src_idx].pins)) self.set_blockages(self.path_blockages, False) # Add the single component of the pin as the source # which unmarks it as a blockage too self.add_pin_component_source(pin_name, src_idx) # Marks all pin components except index as target self.add_pin_component_target(pin_name, dest_idx) # Actually run the A* router if self.run_router(detour_scale=detour_scale): return self.write_debug_gds("debug_route.gds", True)
def report_status(): """ Check for valid arguments and report the info about the SRAM being generated """ global OPTS # Check if all arguments are integers for bits, size, banks if type(OPTS.word_size) != int: debug.error("{0} is not an integer in config file.".format( OPTS.word_size)) if type(OPTS.num_words) != int: debug.error("{0} is not an integer in config file.".format( OPTS.sram_size)) if not OPTS.tech_name: debug.error("Tech name must be specified in config file.") debug.print_raw("Technology: {0}".format(OPTS.tech_name)) total_size = OPTS.word_size * OPTS.num_words * OPTS.num_banks debug.print_raw("Total size: {} bits".format(total_size)) if total_size >= 2**14: debug.warning( "Requesting such a large memory size ({0}) will have a large run-time. " .format(total_size) + "Consider using multiple smaller banks.") debug.print_raw("Word size: {0}\nWords: {1}\nBanks: {2}".format( OPTS.word_size, OPTS.num_words, OPTS.num_banks)) debug.print_raw( "RW ports: {0}\nR-only ports: {1}\nW-only ports: {2}".format( OPTS.num_rw_ports, OPTS.num_r_ports, OPTS.num_w_ports)) if OPTS.netlist_only: debug.print_raw( "Netlist only mode (no physical design is being done, netlist_only=False to disable)." ) if not OPTS.route_supplies: debug.print_raw( "Design supply routing skipped for run-time (incomplete GDS will not be saved) (route_supplies=True to enable)." ) if not OPTS.inline_lvsdrc: debug.print_raw( "DRC/LVS/PEX is only run on the top-level design to save run-time (inline_lvsdrc=True to enable)." ) if not OPTS.check_lvsdrc: debug.print_raw( "DRC/LVS/PEX is disabled (check_lvsdrc=True to enable).") if OPTS.analytical_delay: debug.print_raw( "Characterization is disabled (using analytical delay models) (analytical_delay=False to enable)." ) else: if OPTS.spice_name != "": debug.print_raw( "Performing simulation-based characterization with {}".format( OPTS.spice_name)) if OPTS.trim_netlist: debug.print_raw( "Trimming netlist to speed up characterization (trim_netlist=False to disable)." )
def __init__(self, options, operations, bootarch=None, machine_name=None, boot_timeout=360, platform=None, buildarchs=None, ncores=1, cores_per_socket=None, kernel_args=[], serial_binary="serial_kernel", pci_args=[], eth0=(0xff, 0xff, 0xff), perfcount_type=None, boot_driver=None, tickrate=0, **kwargs): self._name = "(unknown)" self.options = options self._operations = operations self._bootarch = bootarch self._machine_name = machine_name if buildarchs is None: buildarchs = [bootarch] self._build_archs = buildarchs assert (bootarch in buildarchs) self._ncores = ncores if cores_per_socket is None: cores_per_socket = ncores self._cores_per_socket = cores_per_socket self._kernel_args = kernel_args self._boot_driver = boot_driver self._serial_binary = serial_binary self._boot_timeout = boot_timeout self._platform = platform self._pci_args = pci_args self._eth0 = eth0 self._perfcount_type = perfcount_type self._tick_rate = tickrate if bool(kwargs): debug.warning("Machine base class does not understand the " + "following machine arguments: %s" % str(kwargs))
def input_load(self): """Inform users undefined relative capacitance functions used for analytical delays.""" debug.warning("Design Class {0} input capacitance function needs to be defined" .format(self.__class__.__name__)) debug.warning("Class {0} name {1}" .format(self.__class__.__name__, self.name)) return 0
def get_stage_effort(self, cout, inp_is_rise=True): """Inform users undefined delay module while building new modules""" debug.warning("Design Class {0} logical effort function needs to be defined" .format(self.__class__.__name__)) debug.warning("Class {0} name {1}" .format(self.__class__.__name__, self.name)) return None
def _cleanup_ipbenchd(self, user, host): # run a remote killall to get rid of ipbenchd ssh_dest = '%s@%s' % (user, host) remotecmd = 'killall -q python' cmd = ['ssh'] + siteconfig.get('SSH_ARGS').split() + [ssh_dest, remotecmd] debug.verbose('killing ipbenchd on %s' % host) retcode = subprocess.call(cmd) if retcode != 0: debug.warning('failed to killall python on %s!' % host)
def analytical_delay(self, slew, load=0.0): """Inform users undefined delay module while building new modules""" debug.warning( "Design Class {0} delay function needs to be defined".format( self.__class__.__name__)) debug.warning("Class {0} name {1}".format(self.__class__.__name__, self.name)) # return 0 to keep code running while building return delay_data(0.0, 0.0)
def run_drc(cell_name, gds_name, sp_name=None, extract=True, final_verification=False): """Run DRC check on a cell which is implemented in gds_name.""" global num_drc_runs num_drc_runs += 1 write_drc_script(cell_name, gds_name, extract, final_verification, OPTS.openram_temp, sp_name=sp_name) (outfile, errfile, resultsfile) = run_script(cell_name, "ext") (outfile, errfile, resultsfile) = run_script(cell_name, "drc") # Check the result for these lines in the summary: # Total DRC errors found: 0 # The count is shown in this format: # Cell replica_cell_6t has 3 error tiles. # Cell tri_gate_array has 8 error tiles. # etc. try: f = open(outfile, "r") except FileNotFoundError: debug.error( "Unable to load DRC results file from {}. Is magic set up?".format( outfile), 1) results = f.readlines() f.close() errors = 1 # those lines should be the last 3 for line in results: if "Total DRC errors found:" in line: errors = int(re.split(": ", line)[1]) break else: debug.error("Unable to find the total error line in Magic output.", 1) # always display this summary result_str = "DRC Errors {0}\t{1}".format(cell_name, errors) if errors > 0: for line in results: if "error tiles" in line: debug.info(1, line.rstrip("\n")) debug.warning(result_str) else: debug.info(1, result_str) return errors
def runTest(self): globals.init_openram("config_{0}".format(OPTS.tech_name)) from gds_cell import gds_cell from design import design from signal_router import signal_router as router class routing(design, openram_test): """ A generic GDS design that we can route on. """ def __init__(self, name): design.__init__(self, "top") # Instantiate a GDS cell with the design gds_file = "{0}/{1}.gds".format(os.path.dirname(os.path.realpath(__file__)),name) cell = gds_cell(name, gds_file) self.add_inst(name=name, mod=cell, offset=[0,0]) self.connect_inst([]) layer_stack =("metal1","via1","metal2") r=router(layer_stack,self,gds_file) connections=[('out_0_2', 'a_0_0'), ('out_0_3', 'b_0_0'), ('out_0_0', 'a_0_1'), ('out_1_2', 'a_1_0'), ('out_1_3', 'b_1_0'), ('out_1_0', 'a_1_1'), ('out_2_1', 'a_2_0'), ('out_2_2', 'b_2_0'), ('out_3_1', 'a_3_0'), ('out_3_2', 'b_3_0'), ('out_4_6', 'a_4_0'), ('out_4_7', 'b_4_0'), ('out_4_8', 'a_4_2'), ('out_4_9', 'b_4_2'), ('out_4_10', 'a_4_4'), ('out_4_11', 'b_4_4'), ('out_4_0', 'a_4_1'), ('out_4_2', 'b_4_1'), ('out_4_4', 'a_4_5'), ('out_4_1', 'a_4_3'), ('out_4_5', 'b_4_3')] for (src,tgt) in connections: self.assertTrue(r.route(src=src,dest=tgt)) # This test only runs on scn3me_subm tech if OPTS.tech_name=="scn3me_subm": r = routing("07_big_test_{0}".format(OPTS.tech_name)) self.local_drc_check(r) else: debug.warning("This test does not support technology {0}".format(OPTS.tech_name)) # fails if there are any DRC errors on any cells globals.end_openram()
def check_preferred_teams(self): if not isinstance(self.preferred_teams, str) and not isinstance( self.preferred_teams, list): debug.warning( "preferred_teams should be an array of team names or a single team name string. Using default preferred_teams, {}" .format(DEFAULT_PREFERRED_TEAMS)) self.preferred_teams = DEFAULT_PREFERRED_TEAMS if isinstance(self.preferred_teams, str): team = self.preferred_teams self.preferred_teams = [team]
def check_differential_swing(self, bl_volt, br_volt): """This check looks at the difference between the bitline voltages. This needs to be large enough to prevent sensing errors.""" bitline_swing = abs(bl_volt - br_volt) debug.info(1, "Bitline swing={:.3f}v".format(bitline_swing)) vdd_error_margin = .2 #20% of vdd margin for bitline, a little high for now. if bitline_swing < vdd_error_margin * self.vdd_voltage: debug.warning( "Bitline swing less than {}% Vdd margin. Sensing errors more likely to occur." .format(vdd_error_margin))
def getRandomShipType(ship_list): #debug.debug("getRandomShipType(%s)" % (ship_list)) ffvalidate = "" while(ffvalidate == ""): index=vsrandom.randrange(0,len(ship_list)) fishedship = ship_list[index] ffvalidate = str(VS.LookupUnitStat(fishedship,"default","Default_Speed_Governor")) if(ffvalidate == ""): debug.warning("Ship type "+str(fishedship)+" does not have a flight model!") #debug.debug("shipdata: "+str(fishedship) +" has speed governor = "+str(ffvalidate)) return fishedship
def load(self, file): debug.debug("Loading module {0}", (file,), "plugins") try: module = __import__(file) for name, type in module.__dict__.items(): if type.__class__.__name__ == "type": self.loadType(type, file) except Exception as e: debug.warning("Error loading module {0}. Error: {1}.", (file, e), "plugins")
def loadType(self, type, module): if type.__name__.startswith("_"): return if not type.__module__ == module: return try: debug.debug("Attempting to load plugin type {0}", (type.__name__,), "plugins") item = type() item.load(self) except Exception as e: debug.warning("Couldn't load type {0}. Error: {1}.", (type.__name__, e), "plugins")
def __rackpower(self, arg): retries = 3 failed = False while retries > 0: try: debug.checkcmd([RACKPOWER, arg, self.get_machine_name()]) except subprocess.CalledProcessError: debug.warning("rackpower %s %s failed" % (arg, self.get_machine_name())) failed = True if retries > 0: debug.verbose("retrying...") retries -= 1 if not failed: break
def execute_code(code): c = "import os; from os.path import *; result=%s" variables = {'__file__': module.path} try: exec_function(c % code, variables) except Exception: debug.warning('sys path detected, but failed to evaluate') return None try: res = variables['result'] if isinstance(res, str): return os.path.abspath(res) else: return None except KeyError: return None
def follow(self, is_goto=False): """ Returns the imported modules. """ if evaluate.follow_statement.push_stmt(self.import_stmt): # check recursion return [] if self.import_path: try: scope, rest = self._follow_file_system() except ModuleNotFound: debug.warning('Module not found: ' + str(self.import_stmt)) evaluate.follow_statement.pop_stmt() return [] scopes = [scope] scopes += itertools.chain.from_iterable( remove_star_imports(s) for s in scopes) # follow the rest of the import (not FS -> classes, functions) if len(rest) > 1 or rest and self.is_like_search: scopes = [] elif rest: if is_goto: scopes = itertools.chain.from_iterable( evaluate.get_scopes_for_name(s, rest[0], is_goto=True) for s in scopes) else: scopes = itertools.chain.from_iterable( evaluate.follow_path(iter(rest), s, s) for s in scopes) scopes = list(scopes) if self.is_nested_import(): scopes.append(self.get_nested_import(scope)) else: scopes = [ImportPath.GlobalNamespace] debug.dbg('after import', scopes) evaluate.follow_statement.pop_stmt() return scopes
def update(self, force=False): if force == True or self.__should_update(): debug.log("Headlines should update!") self.starttime = time.time() feeds = [] debug.log("{} feeds to update...".format(len(self.feed_urls))) feedparser.USER_AGENT = "mlb-led-scoreboard/3.0 +https://github.com/MLB-LED-Scoreboard/mlb-led-scoreboard" if len(self.feed_urls) > 0: debug.log("Feed URLs found...") for idx, url in enumerate(self.feed_urls): if idx < HEADLINE_MAX_FEEDS: # Only parse MAX teams to prevent potential hangs debug.log("Fetching {}".format(url)) f = feedparser.parse(url) try: title = f.feed.title.encode("ascii", "ignore") debug.log("Fetched feed '{}' with {} entries.".format(title, len(f.entries))) feeds.append(f) except AttributeError: debug.warning("There was a problem fetching {}".format(url)) self.feed_data = feeds
def update(self, force=False): if force == True or self.__should_update(): debug.log("Weather should update!") self.starttime = time.time() if self.apikey_valid: debug.log("API Key hasn't been flagged as bad yet") try: self.observation = self.client.weather_at_place(self.location) weather = self.observation.get_weather() self.temp = weather.get_temperature(self.temperature_unit).get('temp', -99) self.wind_speed = weather.get_wind(self.speed_unit).get('speed', -9) self.wind_dir = weather.get_wind(self.speed_unit).get('deg', 0) self.conditions = weather.get_status() self.icon_name = weather.get_weather_icon_name() debug.log("Weather: {}; Wind: {}; {} ({})".format(self.temperature_string(), self.wind_string(), self.conditions, self.icon_filename())) except pyowm.exceptions.api_response_error.UnauthorizedError: debug.warning("[WEATHER] The API key provided doesn't appear to be valid. Please check your config.json.") debug.warning("[WEATHER] You can get a free API key by visiting https://home.openweathermap.org/users/sign_up") self.apikey_valid = False except (pyowm.exceptions.api_call_error.APICallTimeoutError, ConnectTimeoutError, MaxRetryError, NewConnectionError) as e: debug.warning("[WEATHER] Fetching weather information failed from a connection issue.") debug.log("[WEATHER] Error Message: {}".format(e)) # Set some placeholder weather info if this is our first weather update if self.temp is None: self.temp = -99 if self.wind_speed is None: self.wind_speed = -9 if self.wind_dir is None: self.wind_dir = 0 if self.conditions is None: self.conditions = "Error" if self.icon_name is None: self.icon_name = "50d"
def check_rotate_rates(self): if isinstance(self.rotation_rates, dict) == False: try: rate = float(self.rotation_rates) self.rotation_rates = {"live": rate, "final": rate, "pregame": rate} except: debug.warning("rotation_rates should be a Dict or Float. Using default value. {}".format(DEFAULT_ROTATE_RATES)) self.rotation_rates = DEFAULT_ROTATE_RATES for key, value in list(self.rotation_rates.items()): try: # Try and cast whatever the user passed into a float rate = float(value) self.rotation_rates[key] = rate except: # Use the default rotate rate if it fails debug.warning("Unable to convert rotate_rates[\"{}\"] to a Float. Using default value. ({})".format(key, DEFAULT_ROTATE_RATE)) self.rotation_rates[key] = DEFAULT_ROTATE_RATE if self.rotation_rates[key] < MINIMUM_ROTATE_RATE: debug.warning("rotate_rates[\"{}\"] is too low. Please set it greater than {}. Using default value. ({})".format(key, MINIMUM_ROTATE_RATE, DEFAULT_ROTATE_RATE)) self.rotation_rates[key] = DEFAULT_ROTATE_RATE # Setup some nice attributes to make sure they all exist self.rotation_rates_live = self.rotation_rates.get("live", DEFAULT_ROTATE_RATES["live"]) self.rotation_rates_final = self.rotation_rates.get("final", DEFAULT_ROTATE_RATES["final"]) self.rotation_rates_pregame = self.rotation_rates.get("pregame", DEFAULT_ROTATE_RATES["pregame"])
def __next__(self): if self.closed: raise MultiLevelStopIteration() try: self.current = next(self.gen) except tokenize.TokenError: # We just ignore this error, I try to handle it earlier - as # good as possible debug.warning('parentheses not closed error') return self.__next__() except IndentationError: # This is an error, that tokenize may produce, because the code # is not indented as it should. Here it just ignores this line # and restarts the parser. # (This is a rather unlikely error message, for normal code, # tokenize seems to be pretty tolerant) debug.warning('indentation error on line %s, ignoring it' % self.current[2][0]) # add the starting line of the last position self.line_offset += self.current[2][0] self.gen = PushBackIterator(tokenize.generate_tokens( self.readline)) return self.__next__() c = list(self.current) # stop if a new class or definition is started at position zero. breaks = ['def', 'class', '@'] if self.stop_on_scope and c[1] in breaks and c[2][1] == 0: if self.first_scope: self.closed = True raise MultiLevelStopIteration() elif c[1] != '@': self.first_scope = True c[2] = self.line_offset + c[2][0], c[2][1] c[3] = self.line_offset + c[3][0], c[3][1] return c
def cleanup(self): """perform cleanup if necessary""" self.logfile.close() if self.proc is None or self.proc.poll() == 0: return # clean exit if self.proc.returncode: debug.warning("httperf: SSH to %s exited with error %d" % (self.host, self.proc.returncode)) else: # kill SSH if still up debug.warning("httperf: killing SSH child for %s" % self.host) self.proc.terminate() self.proc.wait() # run a remote killall to get rid of any errant httperfs debug.verbose("killing any errant httperfs on %s" % self.host) p = self._launchssh("killall -q %s" % self.httperf_path) retcode = p.wait() if retcode != 0: debug.warning("failed to killall httperf on %s!" % self.host)
def RemoveShipFromFG (fgname,faction,type,numkill=1,landed=0): key = MakeFGKey (fgname,faction) leg = Director.getSaveStringLength (ccp,key) for i in range (ShipListOffset()+1,leg,PerShipDataSize()): if (Director.getSaveString(ccp,key,i-1)==str(type)): numships=0 numlandedships=0 try: numships = int (Director.getSaveString (ccp,key,i)) numlandedships=int (Director.getSaveString (ccp,key,i+1)) except: debug.warning("unable to get savestring "+i+" from FG "+fgname +" "+faction+ " "+type) if (numships>numkill): numships-=numkill if (numships<numlandedships): if (landed==0): debug.info('trying to remove launched ship '+type+' but all are landed') landed=1 return 0 # failure Director.putSaveString (ccp,key,i,str(numships)) if (landed and numlandedships>0): Director.putSaveString(ccp,key,i+1,str(numlandedships-numkill)) else: numkill=numships numships=0 for j in range (i-1,i+PerShipDataSize()-1): Director.eraseSaveString(ccp,key,i-1) if (numships>=0): try: totalnumships = int(Director.getSaveString(ccp,key,0)) totalnumships -=numkill if (totalnumships>=0): Director.putSaveString(ccp,key,0,str(totalnumships)) if(totalnumships==0): DeleteFG(fgname,faction) else: debug.warning('Warning: removing too many ships') except: debug.warning('Warning: flight record '+fgname+' corrupt') return numkill debug.info('cannot find ship to delete in '+faction+' fg ' + fgname) return 0
import socket, os import debug class BaseSite(object): def __getattr__(self, name): raise AttributeError('site configuration (%s) has no parameter named %s' % (self.__name__, name)) def get_load_generator(self): """Returns a (username, hostname/IP) tuple for a host to which we can SSH to run network load generators. May be called multiple times.""" raise NotImplementedError # this refers to the singleton site instance after importing the correct module site = None # Are we at ETH? if os.path.isdir('/home/netos') and socket.getfqdn().endswith('.ethz.ch'): import eth elif socket.getfqdn().endswith('.europe.corp.microsoft.com'): import msrc elif socket.getfqdn().endswith('triangle') or socket.getfqdn().endswith('.cs.washington.edu'): import uw else: debug.warning("unable to guess site, using ETH... expect breakage!") import eth # shortcut to lookup a configuration parameter for a site def get(name): return getattr(site, name)
def __rackpower(self, arg): try: debug.checkcmd([RACKPOWER, arg, self.get_machine_name()]) except subprocess.CalledProcessError: debug.warning("rackpower %s %s failed" % (arg, self.get_machine_name()))
def _get_path_until_cursor(self, start_pos=None): def fetch_line(): line = self.get_line(self._line_temp) if self._is_first: self._is_first = False self._line_length = self._column_temp line = line[:self._column_temp] else: self._line_length = len(line) line = line + '\n' # add lines with a backslash at the end while 1: self._line_temp -= 1 last_line = self.get_line(self._line_temp) if last_line and last_line[-1] == '\\': line = last_line[:-1] + ' ' + line else: break return line[::-1] self._is_first = True if start_pos is None: self._line_temp = self.position[0] self._column_temp = self.position[1] else: self._line_temp, self._column_temp = start_pos open_brackets = ['(', '[', '{'] close_brackets = [')', ']', '}'] gen = tokenize.generate_tokens(fetch_line) string = '' level = 0 force_point = False last_type = None try: for token_type, tok, start, end, line in gen: #print 'tok', token_type, tok, force_point if last_type == token_type == tokenize.NAME: string += ' ' if level > 0: if tok in close_brackets: level += 1 if tok in open_brackets: level -= 1 elif tok == '.': force_point = False elif force_point: # it is reversed, therefore a number is getting recognized # as a floating point number if token_type == tokenize.NUMBER and tok[0] == '.': force_point = False else: break elif tok in close_brackets: level += 1 elif token_type in [tokenize.NAME, tokenize.STRING]: force_point = True elif token_type == tokenize.NUMBER: pass else: break self._column_temp = self._line_length - end[1] string += tok last_type = token_type except tokenize.TokenError: debug.warning("Tokenize couldn't finish", sys.exc_info) # string can still contain spaces at the end return string[::-1].strip()
def _generate_code(scope, mixin_funcs={}, depth=0): """ Generate a string, which uses python syntax as an input to the PyFuzzyParser. """ def get_doc(obj, indent=False): doc = inspect.getdoc(obj) if doc: doc = ('r"""\n%s\n"""\n' % doc) if indent: doc = common.indent_block(doc) return doc return '' def is_in_base_classes(cls, name, comparison): """ Base classes may contain the exact same object """ if name in mixin_funcs: return False try: mro = cls.mro() except TypeError: # this happens, if cls == type return False for base in mro[1:]: try: attr = getattr(base, name) except AttributeError: continue if attr == comparison: return True return False def get_scope_objects(names): """ Looks for the names defined with dir() in an objects and divides them into different object types. """ classes = {} funcs = {} stmts = {} members = {} for n in names: try: # this has a builtin_function_or_method exe = getattr(scope, n) except AttributeError: # happens e.g. in properties of # PyQt4.QtGui.QStyleOptionComboBox.currentText # -> just set it to None members[n] = None else: if inspect.isclass(scope): if is_in_base_classes(scope, n, exe): continue if inspect.isbuiltin(exe) or inspect.ismethod(exe) \ or inspect.ismethoddescriptor(exe): funcs[n] = exe elif inspect.isclass(exe): classes[n] = exe elif inspect.ismemberdescriptor(exe): members[n] = exe else: stmts[n] = exe return classes, funcs, stmts, members code = '' if inspect.ismodule(scope): # generate comment where the code's from. try: path = scope.__file__ except AttributeError: path = '?' code += '# Generated module %s from %s\n' % (scope.__name__, path) code += get_doc(scope) names = set(dir(scope)) - set(['__file__', '__name__', '__doc__', '__path__', '__package__']) \ | set(['mro']) classes, funcs, stmts, members = get_scope_objects(names) # classes for name, cl in classes.items(): bases = (c.__name__ for c in cl.__bases__) code += 'class %s(%s):\n' % (name, ','.join(bases)) if depth == 0: try: mixin = mixin_funcs[name] except KeyError: mixin = {} cl_code = _generate_code(cl, mixin, depth + 1) code += common.indent_block(cl_code) code += '\n' # functions for name, func in funcs.items(): params, ret = parse_function_doc(func) if depth > 0: params = 'self, ' + params doc_str = get_doc(func, indent=True) try: mixin = mixin_funcs[name] except KeyError: # normal code generation code += 'def %s(%s):\n' % (name, params) code += doc_str code += common.indent_block('%s\n\n' % ret) else: # generation of code with mixins # the parser only supports basic functions with a newline after # the double dots # find doc_str place try: pos = re.search(r'\):\s*\n', mixin).end() except TypeError: # pypy uses a different reversed builtin if name == 'reversed': mixin = 'def reversed(sequence):\n' \ ' for i in self.__sequence: yield i' pos = 24 else: debug.warning('mixin trouble in pypy: %s', name) raise if pos is None: raise Exception("Builtin function not parsed correctly") code += mixin[:pos] + doc_str + mixin[pos:] # class members (functions) properties? for name, func in members.items(): # recursion problem in properties TODO remove if name in ['fget', 'fset', 'fdel']: continue ret = 'pass' code += '@property\ndef %s(self):\n' % (name) code += common.indent_block(get_doc(func) + '%s\n\n' % ret) # variables for name, value in stmts.items(): if is_py3k: file_type = io.TextIOWrapper else: file_type = types.FileType if type(value) == file_type: value = 'open()' elif name == 'None': value = '' elif type(value).__name__ in ['int', 'bool', 'float', 'dict', 'list', 'tuple']: value = repr(value) else: # get the type, if the type is not simple. mod = type(value).__module__ value = type(value).__name__ + '()' if mod != '__builtin__': value = '%s.%s' % (mod, value) code += '%s = %s\n' % (name, value) if depth == 0: #with open('writeout.py', 'w') as f: # f.write(code) #import sys #sys.stdout.write(code) #exit() pass return code
def __init__(self, filename_base, width, height): json = self.__get_config(filename_base) # Preferred Teams/Divisions self.preferred_teams = json["preferred"]["teams"] self.preferred_divisions = json["preferred"]["divisions"] # News Ticker self.news_ticker_team_offday = json["news_ticker"]["team_offday"] self.news_ticker_always_display = json["news_ticker"]["always_display"] self.news_ticker_preferred_teams = json["news_ticker"]["preferred_teams"] self.news_ticker_traderumors = json["news_ticker"]["traderumors"] self.news_ticker_mlb_news = json["news_ticker"]["mlb_news"] self.news_ticker_countdowns = json["news_ticker"]["countdowns"] self.news_ticker_date = json["news_ticker"]["date"] self.news_ticker_date_format = json["news_ticker"]["date_format"] # Display Standings self.standings_team_offday = json["standings"]["team_offday"] self.standings_mlb_offday = json["standings"]["mlb_offday"] self.standings_always_display = json["standings"]["always_display"] self.standings_display_offday = False # Rotation self.rotation_enabled = json["rotation"]["enabled"] self.rotation_scroll_until_finished = json["rotation"]["scroll_until_finished"] self.rotation_only_preferred = json["rotation"]["only_preferred"] self.rotation_rates = json["rotation"]["rates"] self.rotation_preferred_team_live_enabled = json["rotation"]["while_preferred_team_live"]["enabled"] self.rotation_preferred_team_live_mid_inning = json["rotation"]["while_preferred_team_live"]["during_inning_breaks"] # Weather self.weather_apikey = json["weather"]["apikey"] self.weather_location = json["weather"]["location"] self.weather_metric_units = json["weather"]["metric_units"] # Misc config options self.end_of_day = json["end_of_day"] self.full_team_names = json["full_team_names"] self.debug = json["debug"] self.demo_date = json["demo_date"] # Make sure the scrolling speed setting is in range so we don't crash try: self.scrolling_speed = SCROLLING_SPEEDS[json["scrolling_speed"]] except: debug.warning("Scrolling speed should be an integer between 0 and 4. Using default value of {}".format(DEFAULT_SCROLLING_SPEED)) self.scrolling_speed = SCROLLING_SPEEDS[DEFAULT_SCROLLING_SPEED] # Get the layout info json = self.__get_layout(width, height) self.layout = Layout(json, width, height) # Store color information json = self.__get_colors("teams") self.team_colors = Color(json) json = self.__get_colors("scoreboard") self.scoreboard_colors = Color(json) # Check the preferred teams and divisions are a list or a string self.check_preferred_teams() self.check_preferred_divisions() #Check the rotation_rates to make sure it's valid and not silly self.check_rotate_rates()