def complete(p,r,conclusion,d): print "\033[1;33m[!]\033[1;m Strike completed." if conclusion == 0: print "\033[1;31m[-]\033[1;m Given parameters are not vulnerable to XSS." re() elif conclusion ==1: print "\033[1;32m[+]\033[1;m %s Parameter is vulnerable to XSS." %conclusion else: print "\033[1;32m[+]\033[1;m %s Parameters are vulnerable to XSS."%conclusion
def __init__( self , Title=MARKER , Creator=MARKER , Subject=MARKER , Description=MARKER , created=MARKER , created_usage='range:min' , modified=MARKER , modified_usage='range:min' , Type=MARKER , **Ignored ): self.predicates = [] if Title is not self.MARKER: self.filterTitle = Title self.predicates.append( lambda x, pat=re( Title ): pat.search( x.Title() ) ) if Creator is not self.MARKER: self.predicates.append( lambda x, pat=re( Creator ): pat.search( x.Creator() ) ) if Subject and Subject is not self.MARKER: self.filterSubject = Subject self.predicates.append( self.hasSubject ) if Description is not self.MARKER: self.predicates.append( lambda x, pat=re( Description ): pat.search( x.Description() ) ) if created is not self.MARKER: if created_usage == 'range:min': self.predicates.append( lambda x, cd=created: cd <= x.created() ) if created_usage == 'range:max': self.predicates.append( lambda x, cd=created: cd >= x.created() ) if modified is not self.MARKER: if modified_usage == 'range:min': self.predicates.append( lambda x, md=modified: md <= x.modified() ) if modified_usage == 'range:max': self.predicates.append( lambda x, md=modified: md >= x.modified() ) if Type: if type( Type ) == type( '' ): Type = [ Type ] self.filterType = Type self.predicates.append( lambda x, Type=Type: x.Type() in Type )
def occurences(text): alpha_dict = {} all_letters = "abcdefghijklmnopqrstuvwxyz" for l in all_letters: alpha_dict[l] = 0 for l in re(r'\W', "", text): alpha_dict[l] += 1 return alpha_dict
def print_loc_api(self): try: center="center="+re(" ","%20")+"&" except: center = "" if self.longitude == "" or self.latitude == "": return "" else: return "<img src='https://maps.googleapis.com/maps/api/staticmap?"+center+"zoom=13&size=600x300&maptype=roadmap&markers=color:red%7Clabel:G%7C"+self.latitude+","+self.longitude+"&visual_refresh=true' class='img-responsive'>"+'<img src="http://maps.googleapis.com/maps/api/staticmap?center=Albany,+NY&zoom=13&scale=false&size=600x300&maptype=roadmap&format=png&visual_refresh=true&markers=size:mid%7Ccolor:0xff0000%7Clabel:1%7CAlbany,+NY&markers=size:mid%7Ccolor:0xff0000%7Clabel:1%7CAlbany,+NY" alt="Google Map of Albany, NY" hidden>'
def assert_grep(text, expected_regexp, msg=None): '''Fail the test unless the text matches the regular expression.''' if not isinstance(expected_regexp, re.type): expected_regexp = re(expected_regexp) if expected_regexp.search(text): return if msg is None: msg = "Regexp didn't match" msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text) raise AssertionError(msg)
def get_ancillary_services(self, node_id, **kwargs): """ Returns dict of dicts Top-level key is period start time as datetime.datetime object, tzinfo=pytz.utc Top-level value is a dictionary with keys ['RU', 'RD', 'NR', 'SR', 'RMU', 'RMD'] Second-level value is $/MW float If no data, returns an empty dict """ df = self.get_AS_dataframe(node_id, **kwargs) if df.empty: return {} # parse grouped = df.groupby('ANC_TYPE') re = lambda s: {'MW': s} i = ['MW'] a = pandas.DataFrame # merge dataframes firsttime = True for name, group in grouped: if firsttime: a = group[i].rename(columns=re(name)) firsttime = False else: a = pandas.merge(a, group[i].rename(columns=re(name)), left_index=True, right_index=True, how='outer') pandas.set_option('display.width', 200) lmp_dict = {} # loop through start_times for i, row in a.iterrows(): lmp_dict[i.to_pydatetime()] = {} for column in row.keys(): lmp_dict[i.to_pydatetime()][column] = row[column] return lmp_dict
def ToSizeString(byte: int) -> str: ''' 获取字节大小字符串 Parameters ---------- byte : int int格式的字节大小(bytes size). Returns ------- str 自动转换后的大小字符串,如:6.90 GB. ''' units: tuple = ('b', 'KB', 'MB', 'GB', 'TB') re = lambda: '{:.2f} {}'.format(byte, u) for u in units: if byte < 1024: return re() byte /= 1024 return re()
def _update_defaults(self, defaults, old_version, verbose=False): """Update defaults after a change in version""" old_defaults = self._load_old_defaults(old_version) for section, options in defaults.items(): for option in options: new_value = options[option] try: old_value = old_defaults.get(section, option) except (NoSectionError, NoOptionError): old_value = None if old_value is None or re(new_value) != old_value: self._set(section, option, new_value, verbose)
def upload(): target = os.path.join(APP_ROOT, 'images/') print(target) if not os.path.isdir(target): os.mkdir(target) else: print("Couldn't create upload directory: {}".format(target)) print(request.files.getlist("file")) for upload in request.files.getlist("file"): print(upload) print("{} is the file name".format(upload.filename)) filename = upload.filename destination = "/".join([target, filename]) print ("Accept incoming file:", filename) print ("Save it to:", destination) upload.save(destination) re() # return send_from_directory("images", filename, as_attachment=True) return render_template("complete.html", image_name=filename)
def run(self, *commandline): env = dict( os.environ, MALLOC_CHECK_='3', MALLOC_PERTURB_=str(0xA5), ) child = ipc.Popen( list(commandline), stdout=ipc.PIPE, stderr=ipc.PIPE, env=env, ) stdout, stderr = child.communicate() stderr = re('^(?: \S+ --> \S+ \(\d+ bytes\)\n)+$').sub( '', stderr) # strip djvuextract cruft return ipc_result(stdout, stderr, child.returncode)
def roman_to_int(n,re=sonnetRe): global numeral_map if numeral_map is None: numeral_map = zip( (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1), ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I') ) n = n.strip() if re().match(n) is None: #print n,": not a match" return 0 i = result = 0 for integer, numeral in numeral_map: while n[i:i + len(numeral)] == numeral: result += integer i += len(numeral) return result
def compose_specs(all_specs): specs_to_lang_map = {} new_specs = [] for s in all_specs: language = s.get('language'); if specs_to_lang_map.get(language): specs_to_lang_map[language].append(*s.get('specs')) else: specs_to_lang_map[language] = s.get('specs') return [ {'language': l, 'specs': specs_to_lang_map.get(l) } for l in specs_to_lang_map ] for l in specs_to_lang_map: all_defs_for_lang = [] uniq_defs_for_lang = [] defs_to_exmpls_map = {} specs = specs_to_lang_map.get(l) if len(specs) == 1: new_specs.append({ 'language': l, 'specs': specs[0] }) else: for s in specs: for d in s.get('definitions'): definition = d.get('definition') all_defs_for_lang.append(definition) defs_to_exmpls_map[definition] = d.get('examples') for d in all_defs_for_lang: match = '' matched_d = '' for d_to_match in all_defs_for_lang: print(d_to_match) match = re(d, d_to_match) if match: matched_d = d_to_match; next if match: examples = def_to_examples_map[d]; defs_to_examples_map[matched_d].append(examples); matched_d = match = '' next else: uniq_defs.append(d) return new_specs
def json_bson_object_id(obj): for name in obj: value = obj[name] if type(value) == str: if re.match(r'^[0-9 a-f]{24}$', value) is not None: obj[name] = ObjectId(value) elif re.match(r'^D:([0-9]*)$', value) is not None: obj[name] = time.time() elif value[0] == '/': obj[name] = re(value.substring(1)) else: pass elif isinstance(value, list): for index in range(len(value)): tmp = value[index] if isinstance(tmp, dict): value[index] = json_bson_object_id(tmp) elif re.match(r'^[0-9 a-f]{24}$', tmp): value[index] = ObjectId(value) elif isinstance(value, dict): json_bson_object_id(value) return obj
def test_sympy__functions__elementary__complexes__re(): from sympy.functions.elementary.complexes import re assert _test_args(re(x))
# Copyright © 2009-2015 Jakub Wilk # # This package is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 dated June, 1991. import inspect import os import re import subprocess as ipc import xml.etree.cElementTree as etree re.compile.M = re.M re = re.compile re.type = type(re('')) from nose import SkipTest from nose.tools import ( assert_true, assert_equal, assert_not_equal, ) try: from nose.tools import assert_multi_line_equal except ImportError: assert_multi_line_equal = assert_equal else: assert_multi_line_equal.im_class.maxDiff = None
def GET(): try: try: if WAF == "True": finalurl = urlparse(URL) urldata = parse_qsl(finalurl.query) domain0 = u'{uri.scheme}://{uri.netloc}/'.format(uri=finalurl) domain = domain0.replace("https://","").replace("http://","").replace("www.","").replace("/","") paraname = [] paravalue = [] lop = unicode(len(evades)) print "\033[1;97m[>]\033[1;m Payloads loaded: "+lop print "\033[1;97m[>]\033[1;m Striking the paramter(s)" parameters = parse_qs(finalurl.query,keep_blank_values=True) path = finalurl.scheme+"://"+finalurl.netloc+finalurl.path for para in parameters: #Arranging parameters and values. for i in parameters[para]: paraname.append(para) paravalue.append(i) total = 0 conclusion = 0 fpar = [] fresult = [] progress = 0 for param_name, pv in izip(paraname,paravalue): #Scanning the parameter. print "\033[1;97m[>]\033[1;m Testing parameter: "+ param_name fpar.append(unicode(param_name)) for x in evades: # validate = x if validate == "": progress = progress + 1 else: sys.stdout.write("\r\033[1;97m[>]\033[1;m Payloads injected: %i / %s"% (progress,len(evades))) sys.stdout.flush() progress = progress + 1 enc = quote_plus(x) data = path+"?"+param_name+"="+pv+enc time.sleep(10) try: page = br.open(data) sourcecode = page.read() except (Exception): sourcecode = "lol" try: if x in sourcecode: print "\n\033[1;32m[+]\033[1;m XSS Vulnerability Found! \n\033[1;32m[+]\033[1;m Parameter:\t%s\n\033[1;32m[+]\033[1;m Payload:\t%s" %(param_name,x) fresult.append(" Vulnerable ") conclusion = 1 total = total+1 progress = progress + 1 scan_j = raw_input("\033[1;34m[?]\033[1;m Keep the scan running? [y/N] ").lower() if scan_j == "y": pass else: "\033[1;33m[!]\033[1;m Exiting..." sys.exit() else: conclusion = 0 except: "\033[1;33m[!]\033[1;m Exiting..." sys.exit() if conclusion == 0: print "\n\033[1;31m[-]\033[1;m '%s' parameter not vulnerable."%param_name fresult.append("Not Vulnerable") progress = progress + 1 pass progress = 0 complete(fpar,fresult,total,domain) else: finalurl = urlparse(URL) urldata = parse_qsl(finalurl.query) domain0 = u'{uri.scheme}://{uri.netloc}/'.format(uri=finalurl) domain = domain0.replace("https://","").replace("http://","").replace("www.","").replace("/","") paraname = [] paravalue = [] lop = unicode(len(vectors)) print "\033[1;97m[>]\033[1;m Payloads loaded: "+lop print "\033[1;97m[>]\033[1;m Striking the paramter(s)" parameters = parse_qs(finalurl.query,keep_blank_values=True) path = finalurl.scheme+"://"+finalurl.netloc+finalurl.path for para in parameters: #Arranging parameters and values. for i in parameters[para]: paraname.append(para) paravalue.append(i) total = 0 conclusion = 0 fpar = [] fresult = [] progress = 0 for param_name, pv in izip(paraname,paravalue): #Scanning the parameter. print "\033[1;97m[>]\033[1;m Testing parameter: "+ param_name fpar.append(unicode(param_name)) for x in vectors: # validate = x if validate == "": progress = progress + 1 else: sys.stdout.write("\r\033[1;97m[>]\033[1;m Payloads injected: %i / %s"% (progress,len(vectors))) sys.stdout.flush() progress = progress + 1 enc = quote_plus(x) data = path+"?"+param_name+"="+pv+enc try: page = br.open(data) except: print Style.BRIGHT + Fore.RED + "\n[-] Target responded with HTTP 404 Error. Consider exiting.." sourcecode = page.read() if x in sourcecode: print "\n\033[1;32m[+]\033[1;m XSS Vulnerability Found! \n\033[1;32m[+]\033[1;m Parameter:\t%s\n\033[1;32m[+]\033[1;m Payload:\t%s" %(param_name,x) webbrowser.open(URL + x) fresult.append(" Vulnerable ") conclusion = 1 total = total+1 progress = progress + 1 scan_i = raw_input("\033[1;34m[?]\033[1;m Keep the scan running? [y/N] ").lower() if scan_i == "y": pass else: print "\033[1;33m[!]\033[1;m Exiting..." sys.exit() else: conclusion = 0 if conclusion == 0: print "\n\033[1;31m[-]\033[1;m '%s' parameter not vulnerable."%param_name fresult.append("Not Vulnerable") progress = progress + 1 pass progress = 0 complete(fpar,fresult,total,domain) except(httplib.HTTPResponse, socket.error), Exit: print "\033[1;31m[-]\033[1;m URL "+domain+" is offline!" re() except(KeyboardInterrupt), Exit: print "\n\033[1;33m[!]\033[1;m Exiting..."
#!/usr/bin/python3 # -*- coding: utf-8 -*- 'a test module' __author__ = 'viphsj' import requests import re ''' 用到的库: requests re(正则) 流程: 手动登录查看需要的参数,观察一次登录发送的请求 ***** *** * 发现除了一个token是动态的,其他的都是输入或是固定的。参数如下: 'login':'******', 'password':'******', 'authenticity_token':token, 'commit':'Sign in', 'utf8': "✓"} ***** *** * 用正则在登录页面爬取登录用的token,并赋值到参数里 ***** *** *
# Default settings {{{ cprefs = JSONConfig('cover_generation') cprefs.defaults['title_font_size'] = 120 # px cprefs.defaults['subtitle_font_size'] = 80 # px cprefs.defaults['footer_font_size'] = 80 # px cprefs.defaults['cover_width'] = 1200 # px cprefs.defaults['cover_height'] = 1600 # px cprefs.defaults['title_font_family'] = None cprefs.defaults['subtitle_font_family'] = None cprefs.defaults['footer_font_family'] = None cprefs.defaults['color_themes'] = {} cprefs.defaults['disabled_color_themes'] = [] cprefs.defaults['disabled_styles'] = [] cprefs.defaults['title_template'] = '<b>{title}' cprefs.defaults['subtitle_template'] = '''{series:'test($, strcat("<i>", $, "</i> - ", raw_field("formatted_series_index")), "")'}''' cprefs.defaults['footer_template'] = r'''program: # Show at most two authors, on separate lines. authors = field('authors'); num = count(authors, ' & '); authors = sublist(authors, 0, 2, ' & '); authors = list_re(authors, ' & ', '(.+)', '<b>\1'); authors = re(authors, ' & ', '<br>'); re(authors, '&&', '&') ''' Prefs = namedtuple('Prefs', ' '.join(sorted(cprefs.defaults))) _use_roman = None def get_use_roman(): global _use_roman
def require_feature(self, feature): r = self.pdf2djvu('--version') r.assert_(stderr=re('^pdf2djvu '), rc=1) if feature not in r.stderr: raise SkipTest(feature + ' support missing')
from __future__ import annotations import datetime import json import os import random import re from typing import AnyStr, Dict, List, Optional, Tuple, Union from lxml import objectify from lib.common import common from lib.pathlibex import Path ASS_STYLE_RE: re() = re.compile(r'Style: (?P<name>[^,]+),(?P<font_family>[^,]+),(?P<font_size>[^,]+),(?P<color_primary>[^,]+),(?P<color_secondary>[^,]+),(?P<color_outline>[^,]+),(?P<color_back>[^,]+),(?P<is_bold>[^,]+),(?P<is_italic>[^,]+),(?P<is_underline>[^,]+),(?P<is_strikeout>[^,]+),(?P<scale_x>[^,]+),(?P<scale_y>[^,]+),(?P<spacing>[^,]+),(?P<angle>[^,]+),(?P<border_style>[^,]+),(?P<outline>[^,]+),(?P<shadow>[^,]+),(?P<alignment>[^,]+),(?P<margin_l>[^,]+),(?P<margin_r>[^,]+),(?P<margin_v>[^,]+),(?P<encoding>[^,]+)') DIALOGUE_RE: re() = re.compile(r'^(?P<part1>(?:(Comment|Dialogue): )(?:[-+?0-9\.]+),(?P<start>[-+?0-9\.\:]+),(?P<end>[-+?0-9\.\:]+),)(?P<style_name>[^,]*)(?P<part2>,(?:[^,]+)?,(?:[^,]+)?,(?:[^,]+)?,(?:[^,]+)?,(?:[^,]+)?,(?P<subtext>.+)?)$') class mkvstuff: @staticmethod def style_to_dict(dl): dl = dl.strip() m = ASS_STYLE_RE.match(dl) if m: return m.groupdict() else: return None #: @staticmethod def dict_to_style(style_dict):
def relu(x): re = np.vectorize(lambda y: max(0, y)) return re(x)
#reading file f_name = 'B-small42.in' f = open(f_name, 'r') N = int(f.readline()) L = [] for _ in range(N): print(_) #c = int(f.readline()) tmp = f.readline() k, l, s = map(int, tmp.split(" ")) keys = f.readline()[:-1] #map(f.readline().split()) target = f.readline()[:-1] if is_possible(keys, target): L.append(re(s, target, k, keys)) else: L.append(0) #print('nposiible') # In[192]: L # In[177]: max_banana(s, target) # In[17]:
def solution(word, pages): word = word.lower() page_list = [] ''' page = { 'url' : None 'idx' : 0, 'basic' : 0, 'link' : 0, 'pointers' : [이 html을 향하는 html들의 idx] } ''' for page in pages: page_dict = {'url': None, 'basic': 0, 'link': 0, 'pointers': []} meta_tags = re('<meta', '/>', page) # print(meta_tags) link_tags = re('<a', '>', page) # print(link_tags) #find url url_content = meta_tags[0].split(' ')[2] startidx, endidx = -1, -1 for idx, c in enumerate(url_content): if c == '"' and startidx == -1: startidx = idx if c == '"' and startidx != -1 and idx != startidx and endidx == -1: endidx = idx url = url_content[startidx:endidx + 1] page_dict['url'] = url # print(url) #find link for link in link_tags: startidx, endidx = -1, -1 for idx, c in enumerate(link): if c == '"' and startidx == -1: startidx = idx if c == '"' and startidx != -1 and idx != startidx and endidx == -1: endidx = idx link = link[startidx:endidx + 1] # print(startidx, endidx) # print(link) page_dict['pointers'].append(link) page_dict['link'] += 1 #basic points temp_html = '' for c in page: if not (c.islower() or c.isupper()): temp_html += ' ' else: temp_html += c temp_html = temp_html.split(' ') for part in temp_html: if part.lower() == word: page_dict['basic'] += 1 page_list.append(page_dict) #calculate points = [0 for i in range(len(page_list))] for idx, page in enumerate(page_list): points[idx] += page['basic'] for link in page['pointers']: for target_idx, target in enumerate(page_list): if target['url'] == link: points[target_idx] += page['basic'] / page['link'] # print(points) answer = points.index(max(points)) return answer
def f1_fun(y_true, y_pred): precision = pre(y_true, y_pred) recall = re(y_true, y_pred) return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
import re p = open('flag.txt','r') for i in p: if re("nactf{[a-zA-Z0-9_]{48}[aoeui]}",i): print(i)
cprefs = JSONConfig('cover_generation') cprefs.defaults['title_font_size'] = 120 # px cprefs.defaults['subtitle_font_size'] = 80 # px cprefs.defaults['footer_font_size'] = 80 # px cprefs.defaults['cover_width'] = 1200 # px cprefs.defaults['cover_height'] = 1600 # px cprefs.defaults['title_font_family'] = None cprefs.defaults['subtitle_font_family'] = None cprefs.defaults['footer_font_family'] = None cprefs.defaults['color_themes'] = {} cprefs.defaults['disabled_color_themes'] = [] cprefs.defaults['disabled_styles'] = [] cprefs.defaults['title_template'] = '<b>{title}' cprefs.defaults[ 'subtitle_template'] = '''{series:'test($, strcat("<i>", $, "</i> - ", raw_field("formatted_series_index")), "")'}''' cprefs.defaults['footer_template'] = r'''program: # Show at most two authors, on separate lines. authors = field('authors'); num = count(authors, ' & '); authors = sublist(authors, 0, 2, ' & '); authors = list_re(authors, ' & ', '(.+)', '<b>\1'); authors = re(authors, ' & ', '<br>'); re(authors, '&&', '&') ''' Prefs = namedtuple('Prefs', ' '.join(sorted(cprefs.defaults))) # }}} # Draw text {{{ Point = namedtuple('Point', 'x y')
""" Contains common "user story" tests Will test error conditions that a user can reasonably do as in a user cannot pass in an incorrect token re(regex): Gives access to regex for valid_email pytest(pytest module): Gives access to pytest command """ import story_functions as sf import re from subprocess import Popen, PIPE import signal from time import sleep import pytest # Fixture which gets the URL of the server and starts it @pytest.fixture def url(): """ Allows pytest to create a new server. Returns url for new server. """ url_re = re.compile(r" \* Running on ([^ ]*)") server = Popen(["python3", "src/server.py"], stderr = PIPE, stdout = PIPE) line = server.stderr.readline() local_url = url_re.match(line.decode()) if local_url: yield local_url.group(1)
from pytest_bdd import scenario, given, when, then, parsers from pytest_bdd.parsers import re import pytest @scenario('test.feature', 'Arguments for given, when, thens') def test_arguments(): pass @scenario('test.feature', 'Arguments for given, when, thens negative') def test_arguments(): pass @given(re('there are (?P<number>.*) cucumbers')) def start_cucumbers(context, number): print("there are 5 cucumbers") context.total = number # return dict(start = start, eat =0) @when(re('I eat (?P<number>.*) cucumbers')) def eat_cucumbers(context, number): print("I eat 3 cucumbers") context.eat = number # start_cucumbers['eat'] +=eat @then(re('I should have (?P<number>.*) cucumber')) def should_have_left_cucumbers(context, number):
print PI d = {'Michael': 95, 'Bob': 75, 'Tracy': 85} print 'Michael' in d print d.get('Micael') print d.get('Micael', -1) print d.pop('Michael') print d print cmp(1, 2) #-1 print int(12.34) #12 print bool(1) #True print bool(-1) #False print bool(2) #True a = abs # print a(-1) a = "string" bs = [] for s in a: bs.append(s) print bs re("( *)[+-]?(\\d+|\\d+\\.\\d*|\\d*\\.\\d+)([eE][+-]?[0-9]+)?", )
def is_image_or_layer_id(s): """ Return True if the string `s` looks like a layer ID e.g. a SHA256-like id. """ return re(r'^[a-f0-9]{64}$', re.IGNORECASE).match(s)
def Get_Cfg_Recs(Date, Location, HostID, Parent_ID, SN_Masks): "# Called by someone with an Event rec data looking for child cfg \ records. It requires handle <CFG> to already be open for read" Cfg_Recs = [] Rec = [] Buf = '' PrintLog( "Get_Cfg_Recs: Date=%s, Location=%s, HostID=%s, Parent_ID=%s" % Date, Location, HostID, Parent_ID % 1) Last_Rec = [] Last_Rec = gBuf[0].split(r";") if Date < Last_Rec[1]: return # The date we want is earlier than the last record read, # so don't bother with any more Done = 0 while (not Done): # <CFG> # perl had <CFG> barewood file handel that is open ###################### Stop ########################### CFG_line = CFG.readline().rstrip # Read and chomp return CFG_Line_num = CFG.tell() # get the file line postion PrintLog("Read CfgLog line: " + CFG_Line_num, 1) if CFG_line == None: Done = 1 Buf = CFG_line # Save it in case we overrun the date (-> gBuf[0]) Rec = split(";", CFG_line) # Split the records on ; if Rec[0] != 2 and not Done: Exit( 999, "Unrecognized file format for CFG record line: " + CFG_Line_num) elif Rec[1] < Date: # Cfg rec is too early next # Skip elif Rec[1] > Date: # Too late, we'll stop with this record Done = 1 # still loaded in gBuf[0] else: gBuf.append(CFG_Line) PrintLog(CFG_Line, 1) #0: $Type, 1: $Date, 2: $Location, $HostID, $OpID, $Parent_ID, $Slot, $PN, $SN) Msg = '' for index in gBuf: Rec = split(";", index) SNum = Rec[8] if SN_Masks == 0: SN_Match = SN_Masks # Assume a match if no mask list provided if Debug: PrintLog("SN_Match[0]=" + SN_Match, 1) for index2 in SN_Masks: if re(r"^" + index2, SNum): SN_Match = 1 if Debug: PrintLog("SN_Match[1]=" + SN_Match, 1) # #!!! Still in debug ... # foreach ( @Exclude ) { # $SN_Match = 0 if $SNum =~ /^$_/; # } # &PrintLog("SN_Match[2]=$SN_Match",1) if $Debug; if ((Rec[2] == Location) and (Rec[3] == HostID) and (Rec[5] == Parent_ID)): # and # ($SN_Match)){ Msg += ', ' + SNum Cfg_Recs.appen(SNum) #, $Date, $Location, $HostID, $OpID, $Parent_ID, $Slot, $PN, $SN) PrintLog("Returned: " + Msg, 1) gBuf = Buf # Just the last one return (Cfg_Recs)