def _get_extern_stats(self,extern_listing): '''extern_listing is a list of strings, looking for Path and File parameters ''' PathName, FileName, self.is_ev_file = '', '', False g = util.m_grep(extern_listing) g.grep('(File|Path)') path_index, file_index = util.find_index(g.m_groups,'Path'), util.find_index(g.m_groups,'File') if path_index > -1: PathName = g.lines[path_index].split('"')[1]+os.sep FileName = PathName+g.lines[file_index].split('"')[1] self.is_ev_file = len(util.grep_list(os.path.splitext(FileName)[1],'(?:ev|un)[ao]')) > 0 return(FileName)
def start(): global ADDRESS_VAZAMENTOS_INDEX, DISTRICT_VAZAMENTOS_INDEX, STATE_VAZAMENTOS_INDEX, CITY_VAZAMENTOS_INDEX, LAT_LNG_VAZAMENTOS_INDEX, planilha_vazamentos ler_planilha() cabecalho = planilha_vazamentos.row_values(1) ADDRESS_VAZAMENTOS_INDEX = find_index(cabecalho,ADDRESS_VAZAMENTOS_HEADER) DISTRICT_VAZAMENTOS_INDEX = find_index(cabecalho,DISTRICT_VAZAMENTOS_HEADER) STATE_VAZAMENTOS_INDEX = find_index(cabecalho,STATE_VAZAMENTOS_HEADER) CITY_VAZAMENTOS_INDEX = find_index(cabecalho,CITY_VAZAMENTOS_HEADER) LAT_LNG_VAZAMENTOS_INDEX = find_index(cabecalho,LAT_LNG_VAZAMENTOS_HEADER) ler_respostas()
def _getLevelSpsSet(lvl_dict, contents, eq_start, eq_stop, specs_level_dict): spec_hdr_keys = ["specname", "actual", "minimum", "maximum", "units"] eq_contents = contents[eq_start:eq_stop] eq_g = util.m_re(eq_contents) eq_g.grep(specs_level_dict["subSections"]) eq_indices = [int(float(a_num) - 1) for a_num in eq_g.coordinates] + [len(eq_contents)] lvl_dict["sub_sections"] = list( set([a_row.split()[0] for a_row in eq_g.m_groups])) for sub_section, sub_section_start, sub_section_stop in zip( eq_g.m_groups, eq_indices[:-1], eq_indices[1:]): sub_section_type = sub_section.strip().split()[0] sub_section_contents = eq_contents[sub_section_start + 1:sub_section_stop] if not lvl_dict.has_key(sub_section_type): lvl_dict[sub_section_type] = {} if re.search("SPECSET", sub_section_type): m = re.search( "^\s*SPECSET\s+(?P<num>\d+)\s*\"?(?P<name>[\w\s\-]+)?\"?", sub_section) m_dict = m.groupdict() spec_num, spec_name = int(m_dict["num"]), m_dict["name"] lvl_dict[sub_section_type][spec_num] = { "specset_name": spec_name, "SPECS": {} } hdr_i = util.find_index(sub_section_contents, "# SPECNAME\s+\*+ACTUAL") hdr_slices = getSpecHeaderSpan(sub_section_contents[hdr_i]) spec_lines = util.remove_from_list([ re.sub("\s*#\s*.*$", "", a_row) for a_row in sub_section_contents ], "^\s*$") for a_line in spec_lines: units = re.findall(r"\[.*?\]", a_line)[0].strip('[] ') a_line = a_line[:a_line.find('[')] parts = a_line.split() name = parts[0] actual = parts[1] if len(parts) > 2: minVal = parts[2] else: minVal = '' maxVal = '' if len(parts) > 3: maxVal = parts[3] else: maxVal = '' #a_dict = dict([ # (a_key, re.sub("[\[\]]", "", a_line[a_slice]).strip()) # for a_key,a_slice in zip(spec_hdr_keys,hdr_slices) #]) #key_list = list(set(a_dict.iterkeys()) - {"specname"}) lvl_dict[sub_section_type][spec_num]["SPECS"][name] =\ {'actual': actual, 'minimum':minVal, 'maximum': maxVal, 'UNUTS': units} # dict([ # (a_key, a_dict[a_key]) for a_key in key_list #]) pass
def pretty_format(self,pin_list): '''add spacer after each pin passed in from pin_list in each vector of channel data''' self.pin_sequence = [ util.find_index(self.signal_header,'^'+aPin+'$') for aPin in pin_list ] self.pin_sequence.reverse() for i in self.vector_indices: vector_line = list(self.vector_list[i].split('*')[1].replace(' ','').replace('\t','') ) for j in self.pin_sequence: vector_line.insert(j+1,' ') self.vector_list[i] = self.vector_list[i].split('*')[0]+'*'+''.join(vector_line)+'*'+'*'.join(self.vector_list[i].split('*')[2:]) self.all_lines[self.c.start_pts[0]:self.c.end_pts[0]] = self.vector_list
def shuffle_signal_header(self,new_signal_header): self.pin_sequence = [ util.find_index(new_signal_header,'^'+aPin+'$') for aPin in self.signal_header ] for i in self.vector_indices: #pull out vector line and remove spaces between alias characters vector_line = self.vector_list[i].split('*')[1].replace(' ','').replace('\t','') self.s = util.Scramble(vector_line, self.pin_sequence) self.vector_list[i] = self.vector_list[i].split('*')[0]+'*'+''.join(self.s.scrambled)+'*'+'*'.join(self.vector_list[i].split('*')[2:]) self.signal_header = new_signal_header self.all_lines[self.c.start_pts[0]:self.c.end_pts[0]] = self.vector_list self.sig_hdr_dict = dict([ (v,i) for i,v in enumerate(self.signal_header) ])
def determineSetups(testflow_file): """ get levels and timing file reference(s) from setups in testflow file :param testflow_file""" # testflow section fields tfSectionFields = [ "information", "declarations", "implicit_declarations", "flags", "testmethodparameters", "testmethodlimits", "testmethods", "test_suites", "bin_disconnect", "test_flow", "binning", "context", "hardware_bin_descriptions" ] contextDirDict = { "context_config_file": "configuration", "context_levels_file": "levels", "context_timing_file": "timing", "context_vector_file": "vectors", "context_analog_control_file": "analog_control", "context_routing_file": "routing", "context_testtable_file": "testtable", "context_channel_attrib_file": "ch_attributes" } testflowPath = os.path.dirname(testflow_file) devicePath = testflowPath.rpartition(os.path.sep)[0] f = util.FileUtils(testflow_file, True) setupIndex = util.find_index(f.contents, "^\s*setup\s*:\s*[\.\\/\w]+") setupsDict = {} if setupIndex > -1: setupFn = os.path.join(testflowPath, f.contents[setupIndex].split(":")[-1].strip()) f = util.FileUtils(setupFn, True) startPt = util.find_index(f.contents, "^\s*context\\b") stopPt = startPt + util.find_index(f.contents[startPt:], "^\s*end\\b") chkList = [ ",".join([aWord.strip() for aWord in aRow.strip().split("=") ]).replace(";", "").replace('"', '') for aRow in f.contents[startPt:stopPt] if util.in_string(aRow, "=") ] setupsDict = dict([tuple(aRow.split(",")) for aRow in chkList]) for k, v in setupsDict.iteritems(): setupsDict[k] = os.path.join(devicePath, contextDirDict[k], v) return setupsDict
def _getLevelSpsSet(lvl_dict, contents, eq_start, eq_stop, specs_level_dict): spec_hdr_keys = ["specname", "actual", "minimum", "maximum", "units"] eq_contents = contents[eq_start:eq_stop] eq_g = util.m_re(eq_contents) eq_g.grep(specs_level_dict["subSections"]) eq_indices = [int(float(a_num)-1) for a_num in eq_g.coordinates]+[len(eq_contents)] lvl_dict["sub_sections"] = list(set([a_row.split()[0] for a_row in eq_g.m_groups])) for sub_section, sub_section_start, sub_section_stop in zip(eq_g.m_groups, eq_indices[:-1], eq_indices[1:]): sub_section_type = sub_section.strip().split()[0] sub_section_contents = eq_contents[sub_section_start+1:sub_section_stop] if not lvl_dict.has_key(sub_section_type): lvl_dict[sub_section_type] = {} if re.search("SPECSET", sub_section_type): m = re.search("^\s*SPECSET\s+(?P<num>\d+)\s*\"?(?P<name>[\w\s\-]+)?\"?", sub_section) m_dict = m.groupdict() spec_num, spec_name = int(m_dict["num"]), m_dict["name"] lvl_dict[sub_section_type][spec_num] = {"specset_name": spec_name, "SPECS":{}} hdr_i = util.find_index(sub_section_contents, "# SPECNAME\s+\*+ACTUAL") hdr_slices = getSpecHeaderSpan(sub_section_contents[hdr_i]) spec_lines = util.remove_from_list( [re.sub("\s*#\s*.*$", "", a_row) for a_row in sub_section_contents], "^\s*$" ) for a_line in spec_lines: units = re.findall(r"\[.*?\]",a_line)[0].strip('[] ') a_line = a_line[:a_line.find('[')] parts = a_line.split() name = parts[0] actual = parts[1] if len(parts)>2: minVal = parts[2] else: minVal = '' maxVal = '' if len(parts)>3: maxVal = parts[3] else: maxVal = '' #a_dict = dict([ # (a_key, re.sub("[\[\]]", "", a_line[a_slice]).strip()) # for a_key,a_slice in zip(spec_hdr_keys,hdr_slices) #]) #key_list = list(set(a_dict.iterkeys()) - {"specname"}) lvl_dict[sub_section_type][spec_num]["SPECS"][name] =\ {'actual': actual, 'minimum':minVal, 'maximum': maxVal, 'UNUTS': units} # dict([ # (a_key, a_dict[a_key]) for a_key in key_list #]) pass
def setup(): global ADDRESS_INDEX, CEP_INDEX, NUMBER_INDEX, NUMBER_FOR_CEP_INDEX, DISTRICT_INDEX, STATE_INDEX, CITY_INDEX, LAT_LNG_INDEX worksheet = read_worksheet() headers = worksheet.row_values(1) ADDRESS_INDEX = find_index(headers, ADDRESS_HEADER) CEP_INDEX = find_index(headers, CEP_HEADER) NUMBER_INDEX = find_index(headers, NUMBER_HEADER) NUMBER_FOR_CEP_INDEX = find_index(headers, NUMBER_FOR_CEP_HEADER) DISTRICT_INDEX = find_index(headers, DISTRICT_HEADER) STATE_INDEX = find_index(headers, STATE_HEADER) CITY_INDEX = find_index(headers, CITY_HEADER) LAT_LNG_INDEX = find_index(headers, LAT_LNG_HEADER) t = Thread(target=notification_thread) t.start() return
def dict_2_list(self,aDict): "expecting only strings for dictionary key and value(s)" aList=[] spacer='|------> ' ListOfKeys = aDict.keys() EvaIndex = util.find_index(ListOfKeys,'eva') if EvaIndex >= 0: EvaKey = ListOfKeys.pop(EvaIndex) aList.append('+ '+EvaKey) spacer = '|-----> ' for value in aDict[EvaKey]: aList.append(spacer+value) spacer=' |-----> ' for aKey in ListOfKeys: aList.append(' ++ '+aKey) for value in aDict[aKey]: aList.append(spacer+value) return(aList)
def _getTimingGlobalSpecVars(t_dict, eq_contents): """ determine if we have any global variables in a timing SPECIFICATION object """ spec_hdr_keys = ["specname", "actual", "minimum", "maximum", "units"] eq_g = util.m_re(eq_contents) eq_g.grep("[{}]") #pull out {} blocks, such as SYNC { ..... } if eq_g.pattern_count >= 2: start_pts, stop_pts = util.find_blocks(eq_g.m_groups, "{", "}") i_list = [(int(float(eq_g.coordinates[s_a]) - 2), int(float(eq_g.coordinates[s_b]))) for s_a, s_b in zip(start_pts, stop_pts)] del_indices = [] for (x, y) in i_list: del_indices += range(x, y) eq_contents = [ eq_contents[i] for i in list(set(xrange(len(eq_contents))) - set(del_indices)) ] eq_g = util.m_re(eq_contents) else: eq_g.clear_cache() hdr_i = util.find_index(eq_contents, "# SPECNAME\s+\*+ACTUAL") if hdr_i > -1: hdr_slices = getSpecHeaderSpan(eq_contents[hdr_i]) spec_lines = util.remove_from_list([ re.sub("\s*#\s*.*$", "", a_row) for a_row in eq_contents[hdr_i + 1:] ], "^\s*$") for a_line in spec_lines: a_dict = dict([ (a_key, re.sub("[\[\]]", "", a_line[a_slice]).strip()) for a_key, a_slice in zip(spec_hdr_keys, hdr_slices) ]) key_list = list(set(a_dict.iterkeys()) - {"specname"}) t_dict[a_dict["specname"]] = dict([(a_key, a_dict[a_key]) for a_key in key_list]) pass
def _getTimingGlobalSpecVars(t_dict, eq_contents): """ determine if we have any global variables in a timing SPECIFICATION object """ spec_hdr_keys = ["specname", "actual", "minimum", "maximum", "units"] eq_g = util.m_re(eq_contents) eq_g.grep("[{}]") #pull out {} blocks, such as SYNC { ..... } if eq_g.pattern_count >= 2: start_pts,stop_pts = util.find_blocks(eq_g.m_groups, "{", "}") i_list = [ (int(float(eq_g.coordinates[s_a])-2), int(float(eq_g.coordinates[s_b]))) for s_a,s_b in zip(start_pts, stop_pts) ] del_indices=[] for (x,y) in i_list: del_indices += range(x,y) eq_contents = [ eq_contents[i] for i in list(set(xrange(len(eq_contents))) - set(del_indices)) ] eq_g = util.m_re(eq_contents) else: eq_g.clear_cache() hdr_i = util.find_index(eq_contents, "# SPECNAME\s+\*+ACTUAL") if hdr_i > -1: hdr_slices = getSpecHeaderSpan(eq_contents[hdr_i]) spec_lines = util.remove_from_list( [re.sub("\s*#\s*.*$", "", a_row) for a_row in eq_contents[hdr_i+1:]], "^\s*$" ) for a_line in spec_lines: a_dict = dict([ (a_key, re.sub("[\[\]]", "", a_line[a_slice]).strip()) for a_key,a_slice in zip(spec_hdr_keys,hdr_slices) ]) key_list = list(set(a_dict.iterkeys()) - {"specname"}) t_dict[a_dict["specname"]] = dict([(a_key, a_dict[a_key]) for a_key in key_list]) pass
def getTiming(timing_file, spec_timing_groups, full_pin_list, pin_group_dict, ref_eqn_dict): """ ref_eqn_dict is a running copy of the ref_dict["EQN"] dictionary, and is treated as "read-only", with the sole purpose as a reference for the SPEC parameters that get determined in the ref_dict["SPS"] dictionary. """ ref_dict = {"EQN": {}, "SPS": {}, "WVT": {}} f = util.FileUtils(timing_file, True) g = util.m_re(f.contents) g.grep(spec_timing_groups["topLevel"]) # with timing, the eqnset is broken down into two sets: Equation(EQN) and Spec(SPS) start_pts, stop_pts = util.find_blocks(g.m_groups, spec_timing_groups["startClause"], spec_timing_groups["stopClause"]) for start, stop in zip(start_pts, stop_pts): contents = g.allLines[int(float(g.coordinates[start]) - 1):int(float(g.coordinates[stop]) - 1)] if util.in_string(g.lines[start], "EQN"): timing_key = "EQN" elif util.in_string(g.lines[start], "WVT"): timing_key = "WVT" break else: timing_key = "SPS" b = util.m_re(contents) b.grep(spec_timing_groups["SPS"]["SPECIFICATION"]) #----------------------------------------------------------------------------------------------------------- # rlogan .. 26apr2016 wrapped in try/catch to display offending code for OMAP5 FPC try: specName = re.sub("^\s*SPECIFICATION\s+\"|\"", "", b.m_groups[0]).strip() except KeyError: import sys sys.exit(contents) #----------------------------------------------------------------------------------------------------------- ref_dict["SPS"] = {specName: {"GLOBALS": {}}} timing_dict = spec_timing_groups[timing_key] b = util.m_re(contents) for remove_expr in timing_dict["remove"]: b.sub(remove_expr, "") contents = util.remove_from_list(contents, "^\s*$") b = util.m_re(contents) b.grep(timing_dict["topLevel"]) i_list = [int(float(a_num) - 1) for a_num in b.coordinates] if len(i_list) > 1: eqn_set_indices = [ (a, b) for a, b in zip(i_list, i_list[1:] + [len(contents)]) ] else: eqn_set_indices = [(i_list[0], len(contents))] if timing_key == "SPS": # let's check for globals _getTimingGlobalSpecVars( ref_dict[timing_key][specName]["GLOBALS"], contents[util.find_index(contents, "{") + 1:i_list[0]]) for (eq_start, eq_stop) in eqn_set_indices: eq_m = re.search( "^\s*EQNSET\s+(?P<eq_num>\d+)\s*\"?(?P<eq_name>[\w\s\-\.]+)?\"?", contents[eq_start]) eq_num, eq_name = -99, "" try: eq_dict = eq_m.groupdict() eq_num, eq_name = int(eq_dict["eq_num"]), eq_dict["eq_name"] except KeyError, e: if eq_num == -99: print "Uh Oh, we should always have an EQNSET number\n{0}".format( e) print "\n{0}".format(contents[eq_start]) raise else: pass # may not always have an equation set name if timing_key == "EQN": ref_dict[timing_key][eq_num] = { "eq_name": eq_name, "sub_sections": [] } _getTimingEqnSet(ref_dict[timing_key][eq_num], contents, eq_start, eq_stop, timing_dict, full_pin_list, pin_group_dict) elif timing_key == "SPS": ref_dict[timing_key][specName][eq_num] = {"name": eq_name} _getTimingSpsSet(ref_dict[timing_key][specName][eq_num], contents[eq_start:eq_stop], timing_dict, ref_eqn_dict[eq_num]["SPECS"].keys())
def setup(app): global ADDRESS_INDEX, CEP_INDEX, NUMBER_INDEX, NUMBER_FOR_CEP_INDEX, DISTRICT_INDEX, STATE_INDEX, CITY_INDEX, LAT_LNG_INDEX, ADDRESS_VAZAMENTOS_INDEX, DISTRICT_VAZAMENTOS_INDEX, STATE_VAZAMENTOS_INDEX, CITY_VAZAMENTOS_INDEX, LAT_LNG_VAZAMENTOS_INDEX, planilha_vazamentos, worksheet configure_app(app) worksheet = read_worksheet() headers = worksheet.row_values(1) ADDRESS_INDEX = find_index(headers, ADDRESS_HEADER) CEP_INDEX = find_index(headers, CEP_HEADER) NUMBER_INDEX = find_index(headers, NUMBER_HEADER) NUMBER_FOR_CEP_INDEX = find_index(headers, NUMBER_FOR_CEP_HEADER) DISTRICT_INDEX = find_index(headers, DISTRICT_HEADER) STATE_INDEX = find_index(headers, STATE_HEADER) CITY_INDEX = find_index(headers, CITY_HEADER) LAT_LNG_INDEX = find_index(headers, LAT_LNG_HEADER) planilha_vazamentos = ler_planilha() cabecalho = planilha_vazamentos.row_values(1) ADDRESS_VAZAMENTOS_INDEX = find_index(cabecalho, ADDRESS_VAZAMENTOS_HEADER) DISTRICT_VAZAMENTOS_INDEX = find_index(cabecalho, DISTRICT_VAZAMENTOS_HEADER) STATE_VAZAMENTOS_INDEX = find_index(cabecalho, STATE_VAZAMENTOS_HEADER) CITY_VAZAMENTOS_INDEX = find_index(cabecalho, CITY_VAZAMENTOS_HEADER) LAT_LNG_VAZAMENTOS_INDEX = find_index(cabecalho, LAT_LNG_VAZAMENTOS_HEADER) t = Thread(target=notification_thread) t.start() return
def getTiming(timing_file, spec_timing_groups, full_pin_list, pin_group_dict, ref_eqn_dict): """ ref_eqn_dict is a running copy of the ref_dict["EQN"] dictionary, and is treated as "read-only", with the sole purpose as a reference for the SPEC parameters that get determined in the ref_dict["SPS"] dictionary. """ ref_dict = {"EQN":{}, "SPS":{}, "WVT":{}} f = util.FileUtils(timing_file, True) g = util.m_re(f.contents) g.grep(spec_timing_groups["topLevel"]) # with timing, the eqnset is broken down into two sets: Equation(EQN) and Spec(SPS) start_pts,stop_pts = util.find_blocks( g.m_groups, spec_timing_groups["startClause"], spec_timing_groups["stopClause"] ) for start, stop in zip(start_pts, stop_pts): contents = g.allLines[int(float(g.coordinates[start])-1): int(float(g.coordinates[stop])-1)] if util.in_string(g.lines[start], "EQN"): timing_key = "EQN" elif util.in_string(g.lines[start], "WVT"): timing_key = "WVT" break else: timing_key = "SPS" b = util.m_re(contents) b.grep(spec_timing_groups["SPS"]["SPECIFICATION"]) specName = re.sub("^\s*SPECIFICATION\s+\"|\"", "", b.m_groups[0]).strip() ref_dict["SPS"] = {specName:{"GLOBALS":{}}} timing_dict = spec_timing_groups[timing_key] b = util.m_re(contents) for remove_expr in timing_dict["remove"]: b.sub(remove_expr,"") contents = util.remove_from_list(contents, "^\s*$") b = util.m_re(contents) b.grep(timing_dict["topLevel"]) i_list = [int(float(a_num)-1) for a_num in b.coordinates] if len(i_list)>1: eqn_set_indices = [(a,b) for a,b in zip(i_list,i_list[1:]+[len(contents)])] else: eqn_set_indices = [(i_list[0], len(contents))] if timing_key == "SPS": # let's check for globals _getTimingGlobalSpecVars(ref_dict[timing_key][specName]["GLOBALS"], contents[util.find_index(contents,"{")+1:i_list[0]]) for (eq_start,eq_stop) in eqn_set_indices: eq_m = re.search("^\s*EQNSET\s+(?P<eq_num>\d+)\s*\"?(?P<eq_name>[\w\s\-\.]+)?\"?", contents[eq_start]) eq_num, eq_name = -99, "" try: eq_dict = eq_m.groupdict() eq_num, eq_name = int(eq_dict["eq_num"]), eq_dict["eq_name"] except KeyError, e: if eq_num == -99: print "Uh Oh, we should always have an EQNSET number\n{0}".format(e) print "\n{0}".format(contents[eq_start]) raise else: pass # may not always have an equation set name if timing_key == "EQN": ref_dict[timing_key][eq_num] = {"eq_name": eq_name, "sub_sections":[]} _getTimingEqnSet( ref_dict[timing_key][eq_num], contents, eq_start, eq_stop, timing_dict, full_pin_list, pin_group_dict ) elif timing_key == "SPS": ref_dict[timing_key][specName][eq_num] = {"name":eq_name} _getTimingSpsSet(ref_dict[timing_key][specName][eq_num], contents[eq_start:eq_stop], timing_dict, ref_eqn_dict[eq_num]["SPECS"].keys())
import collections from util import find_index import glob plt.close('all') area_file=glob.glob("areacella_fx_HadGEM2*nc")[0] mask_file=glob.glob("sftlf_fx_HadGEM2*nc")[0] #area_file='/Volumes/mac2/phil/pcmdi/CanESM2/esmControl/fx/atmos/areacella/r0i0p0/areacella_fx_CanESM2_esmControl_r0i0p0.nc' #mask_file='/Volumes/mac2/phil/pcmdi/CanESM2/esmControl/fx/atmos/sftlf/r0i0p0/sftlf_fx_CanESM2_esmControl_r0i0p0.nc' nc_area=Dataset(area_file) lats=nc_area.variables['lat'][...] lons=nc_area.variables['lon'][...] lat_bounds=find_index(lats,[wp.ll.lat,wp.ur.lat]) lat_bounds[1]+=1 lon_bounds=find_index(lons,[wp.ll.lon,wp.ur.lon]) lon_bounds[1]+=1 lat_bounds=(0,None) lon_bounds=(0,None) lat_bounds=slice(*lat_bounds) lon_bounds=slice(*lon_bounds) areas=nc_area.variables['areacella'][lat_bounds,lon_bounds] lats=lats[lat_bounds] lons=lons[lon_bounds]