Exemplo n.º 1
0
def PattenCut(patten:str, string:str, start:int=0, end:int=None)->iter:
    """ 标准的字符截取器,返回一个迭代对象 """
    pattenX = Compile(patten)
    coincide = pattenX.finditer(string)
    if coincide:
        for i in coincide:
            yield i.group()[start:end]
Exemplo n.º 2
0
Arquivo: GHDL.py Projeto: krabo0om/PoC
def GHDLRunFilter(gen):
	#  Pattern                                                             Classification
	# ------------------------------------------------------------------------------------------------------
	#  <path>:<line>:<column>: <message>                                -> Severity.Error (by (*))
	#  <path>:<line>:<column>:<severity>: <message>                     -> According to <severity>
	#  <path>:<line>:<column>:@<time>:(report <severity>): <message>    -> According to <severity>
	#  others                                                           -> Severity.Normal
	#  (*) -> unknown <severity>                                        -> Severity.Error

	filterPattern = r".+?:\d+:\d+:((?P<report>@\w+:\((?:report|assertion) )?(?P<severity>\w+)(?(report)\)):)? (?P<message>.*)"
	filterRegExp = RegExpCompile(filterPattern)

	lineno = 0
	for line in gen:
		if (lineno < 2):
			lineno += 1
			if ("Linking in memory" in line):
				yield LogEntry(line, Severity.Verbose)
				continue
			if ("Starting simulation" in line):
				yield LogEntry(line, Severity.Verbose)
				continue

		filterMatch = filterRegExp.match(line)
		if filterMatch is not None:
			yield LogEntry(line, Severity.ParseVHDLSeverityLevel(filterMatch.group('severity'), Severity.Error))
			continue

		yield LogEntry(line, Severity.Normal)
Exemplo n.º 3
0
def loadData(
        withTopology=False,
        withCorner=False) -> Tuple[Dict[str, Component], Dict[str, Compound]]:
    databasePath = _path('../data/main')
    database = connect(databasePath)
    cursor = database.cursor()
    strokeDataPattern = RE(r'(?<=\d)(?=M)')
    COMPONENTS = {}
    for row in cursor.execute(
            'SELECT name, gb, pinyin, feature, svg FROM main WHERE operator IS NULL;'
    ):
        name, inGB, pinyinString, featureString, svgString = row
        pinyinList = [] if pinyinString is None else pinyinString.split(',')
        featureList = featureString.split(',')
        svgList = strokeDataPattern.split(svgString)
        strokeList = [
            Stroke(feature, svg) for feature, svg in zip(featureList, svgList)
        ]
        COMPONENTS[name] = Component(name,
                                     strokeList,
                                     None,
                                     inGB=inGB,
                                     pinyinList=pinyinList)
    if withTopology:
        topologyPath = _path('../data/topology')
        if not exists(topologyPath): buildTopology(COMPONENTS, topologyPath)
        with open(topologyPath, 'rb') as f:
            TOPOLOGIES = load(f)
        for name, component in COMPONENTS.items():
            component.topologyMatrix = TOPOLOGIES[name]
    if withCorner:
        cornerPath = _path('../data/corner')
        if not exists(cornerPath): buildCorner(COMPONENTS, cornerPath)
        with open(cornerPath, 'rb') as f:
            CORNERS = load(f)
        for name, component in COMPONENTS.items():
            component.corner = CORNERS[name]
    COMPOUNDS = {}
    compoundData = cursor.execute(
        'SELECT name, gb, pinyin, operator, first, second, mix FROM main WHERE operator IS NOT NULL;'
    ).fetchall()
    while compoundData:
        row = compoundData.pop(0)
        name, inGB, pinyinString, operator, firstChildName, secondChildName, mix = row
        pinyinList = [] if pinyinString is None else pinyinString.split(',')
        firstChild = COMPONENTS.get(firstChildName,
                                    COMPOUNDS.get(firstChildName))
        secondChild = COMPONENTS.get(secondChildName,
                                     COMPOUNDS.get(secondChildName))
        if firstChild and secondChild:
            COMPOUNDS[name] = Compound(name,
                                       operator,
                                       firstChild,
                                       secondChild,
                                       mix,
                                       inGB=inGB,
                                       pinyinList=pinyinList)
        else:
            compoundData.append(row)
    return COMPONENTS, COMPOUNDS
Exemplo n.º 4
0
	def _DecodeAltera(self, deviceString):
		self.__vendor = Vendors.Altera

		deviceRegExpStr  = r"(?P<gen>\d{1,2})"  # generation
		deviceRegExpStr += r"(?P<fam>[acms])"  # family
		deviceRegExpStr += r"(?P<st>(ls|e|g|x|t|gs|gx|gt|gz|sx|st)?)"  # subtype
		deviceRegExp = RegExpCompile(deviceRegExpStr)
		deviceRegExpMatch = deviceRegExp.match(deviceString[2:].lower())

		if (deviceRegExpMatch is not None):
			self.__generation = int(deviceRegExpMatch.group('gen'))

			family = deviceRegExpMatch.group('fam')
			for fam in AlteraFamilies:
				if fam.Token == family:
					self.__family = fam
					break
			else:
				raise ConfigurationException("Unknown Altera device family.")

			subtype = deviceRegExpMatch.group('st')
			if (subtype != ""):
				d = {"g": "gx", "x": "sx", "t": "gt"} # re-name for Stratix 10 and Arria 10
				if subtype in d: subtype = d[subtype]
				self.__subtype = SubTypes[subtype.upper()]
			else:
				self.__subtype = SubTypes.NoSubType

		else:
			raise ConfigurationException("RegExp mismatch.")
Exemplo n.º 5
0
	def __GetQuestaSimVersion(self, binPath):
		if (self._host.Platform == "Windows"):
			vsimPath = binPath / "vsim.exe"
		else:
			vsimPath = binPath / "vsim"

		if not vsimPath.exists():
			raise ConfigurationException("Executable '{0!s}' not found.".format(vsimPath)) from FileNotFoundError(
				str(vsimPath))

		# get version and backend
		try:
			output = check_output([str(vsimPath), "-version"], universal_newlines=True)
		except OSError as ex:
			raise ConfigurationException("Error while accessing '{0!s}'.".format(vsimPath)) from ex

		version = None
		versionRegExpStr = r"^.* vsim (.+?) "
		versionRegExp = RegExpCompile(versionRegExpStr)
		for line in output.split('\n'):
			if version is None:
				match = versionRegExp.match(line)
				if match is not None:
					version = match.group(1)

		self._host.PoCConfig[self._section]['Version'] = version
Exemplo n.º 6
0
Arquivo: GHDL.py Projeto: krabo0om/PoC
	def __WriteGHDLSection(self, binPath):
		if (self._host.Platform == "Windows"):
			ghdlPath = binPath / "ghdl.exe"
		else:
			ghdlPath = binPath / "ghdl"

		if not ghdlPath.exists():
			raise ConfigurationException("Executable '{0!s}' not found.".format(ghdlPath)) from FileNotFoundError(
				str(ghdlPath))

		# get version and backend
		output = check_output([str(ghdlPath), "-v"], universal_newlines=True)
		version = None
		backend = None
		versionRegExpStr = r"^GHDL (.+?) "
		versionRegExp = RegExpCompile(versionRegExpStr)
		backendRegExpStr = r"(?i).*(mcode|gcc|llvm).* code generator"
		backendRegExp = RegExpCompile(backendRegExpStr)
		for line in output.split('\n'):
			if version is None:
				match = versionRegExp.match(line)
				if match is not None:
					version = match.group(1)

			if backend is None:
				match = backendRegExp.match(line)
				if match is not None:
					backend = match.group(1).lower()

		self._host.PoCConfig[self._section]['Version'] = '<unknown>' if version is None else version
		self._host.PoCConfig[self._section]['Backend'] = '<unknown>' if backend is None else backend
Exemplo n.º 7
0
	def _DecodeXilinx(self, deviceString):
		self.__vendor = Vendors.Xilinx
		self.__generation = int(deviceString[2:3])

		familyToken = deviceString[3:4].lower()
		for fam in XilinxFamilies:
			if fam.Token == familyToken:
				self.__family = fam
				break
		else:
			raise ConfigurationException("Unknown Xilinx device family.")

		deviceRegExpStr =  r"(?P<st1>[a-z]{0,2})"   # device subtype - part 1
		deviceRegExpStr += r"(?P<no>\d{1,4})"       # device number
		deviceRegExpStr += r"(?P<st2>[t]{0,1})"     # device subtype - part 2
		deviceRegExpStr += r"(?P<sg>[-1-5]{2})"     # speed grade
		deviceRegExpStr += r"(?P<pack>[a-z]{1,3})"  # package
		deviceRegExpStr += r"(?P<pins>\d{1,4})"     # pin count
		deviceRegExp = RegExpCompile(deviceRegExpStr)
		deviceRegExpMatch = deviceRegExp.match(deviceString[4:].lower())

		if (deviceRegExpMatch is not None):
			subtype = deviceRegExpMatch.group('st1') + deviceRegExpMatch.group('st2')
			package = deviceRegExpMatch.group('pack')

			if (subtype != ""):    self.__subtype = SubTypes[subtype.upper()]
			else:                  self.__subtype = SubTypes.NoSubType

			self.__number =      int(deviceRegExpMatch.group('no'))
			self.__speedGrade =  int(deviceRegExpMatch.group('sg'))
			self.__package =    Packages[package.upper()]
			self.__pinCount =    int(deviceRegExpMatch.group('pins'))
		else:
			raise ConfigurationException("RegExp mismatch.")
Exemplo n.º 8
0
def rotate(P, W, angle, xOffset, yOffset, line):
    REGCODE = COMPILE('([a-z]-?[0-9]+\.?([0-9]+)?)|\(.*\)')
    inLine = line
    comment = ''
    i = inLine.find('(')
    if i >= 0:
        comment = inLine[i:]
        inLine = inLine[:i - 1].strip()
    if len(inLine) > 0:
        parts = list([list(cmd)[0] for cmd in REGCODE.findall(inLine)])
        if len(parts) <= 1 or parts[0] not in ['g0', 'g1', 'g2', 'g3', 'x', 'y'] or \
           inLine.replace(' ','').startswith('g0z[#<_ini[axis_z]max_limit>-'):
            return '{}\n'.format(line)
        angle = math.radians(angle)
        params = {
            'x': 0.0,
            'y': 0.0,
            'r': 0.0,
            'i': 0.0,
            'j': 0.0,
        }
        used = ''
        for p in parts:
            for n in 'xyrij':
                if n in p:
                    if n == 'x':
                        params['x'] = float(p.strip(n))
                        used += 'x'
                    elif n == 'y':
                        params['y'] = float(p.strip(n))
                        used += 'y'
                    elif n == 'r':
                        params['r'] = float(p.strip(n))
                        used += 'r'
                    elif n == 'i':
                        params['i'] = float(p.strip(n))
                        used += 'i'
                    elif n == 'j':
                        params['j'] = float(p.strip(n))
                        used += 'j'
        newLine = ('{}'.format(parts[0]))
        if 'x' in used:
            newLine += (' x{:.6f}'.format(params['x'] * math.cos(angle) -
                                          params['y'] * math.sin(angle) +
                                          xOffset))
        if 'y' in used:
            newLine += (' y{:.6f}'.format(params['y'] * math.cos(angle) +
                                          params['x'] * math.sin(angle) +
                                          yOffset))
        if 'r' in used:
            newLine += (' r{:.6f}'.format(params['r']))
        if 'i' in used:
            newLine += (' i{:.6f}'.format(params['i'] * math.cos(angle) -
                                          params['j'] * math.sin(angle)))
        if 'j' in used:
            newLine += (' j{:.6f}'.format(params['j'] * math.cos(angle) +
                                          params['i'] * math.sin(angle)))
        return ('{}\n'.format(newLine))
Exemplo n.º 9
0
def remove_compiled_regex(txt: str,
                          compiled_regex: re.compile,
                          substitute: str = ""):
    """
    Search for the compiled regex in the txt and either replace it with the substitute or remove it
    """
    entities = compiled_regex.findall(txt)
    txt = compiled_regex.sub(substitute, txt)
    return txt, entities
Exemplo n.º 10
0
def rotate(P, W, angle, xOffset, yOffset, line):
    REGCODE = COMPILE('([a-z]-?[0-9]+\.?([0-9]+)?)|\(.*\)')
    inLine = line.strip()
    comment = ''
    i = inLine.find('(')
    if i >= 0:
        comment = inLine[i:]
        inLine = inLine[:i - 1].strip()
    if len(inLine) > 0:
        parts = list([list(cmd)[0] for cmd in REGCODE.findall(inLine)])
        if len(parts) == 0 or parts[0] not in ['g0', 'g1', 'g2', 'g3']:
            return line
        angle = math.radians(angle)
        params = {
            'x': 0.0,
            'y': 0.0,
            'i': 0.0,
            'j': 0.0,
        }
        used = ''
        for p in parts:
            for n in 'xyij':
                if n in p:
                    if n == 'x':
                        params['x'] = float(p.strip(n))
                        used += 'x'
                    elif n == 'y':
                        params['y'] = float(p.strip(n))
                        used += 'y'
                    elif n == 'i':
                        params['i'] = float(p.strip(n))
                        used += 'i'
                    elif n == 'j':
                        params['j'] = float(p.strip(n))
                        used += 'j'
        newLine = ('{}'.format(parts[0]))
        if not 'x' in used and not 'y' in used:
            P.dialogError = True
            P.dialog_error('ROTATE', 'Cannot decipher G-Code correctly')
            return None
        if 'x' in used:
            newLine += (' x{:.6f}'.format(params['x'] * math.cos(angle) -
                                          params['y'] * math.sin(angle) +
                                          xOffset))
        if 'y' in used:
            newLine += (' y{:.6f}'.format(params['y'] * math.cos(angle) +
                                          params['x'] * math.sin(angle) +
                                          yOffset))
        if parts[0] in {'g2', 'g3'}:
            newLine += (' i{:.6f}'.format(params['i'] * math.cos(angle) -
                                          params['j'] * math.sin(angle)))
            newLine += (' j{:.6f}'.format(params['j'] * math.cos(angle) +
                                          params['i'] * math.sin(angle)))
        return ('{}\n'.format(newLine))
Exemplo n.º 11
0
def PattenCut(patten, string, start=0, end=None) -> list:
    """ 按照模式patten对目标字符串截取匹配部分coincidence,
    返回coincidence[start:end],若没有匹配则返回None """
    patten1 = Compile(patten)
    coincide = patten1.finditer(string)
    if coincide:
        cc = []
        for i in coincide:
            cc.append(i.group()[start:end])
        return cc
    else:
        raise PattenError()
Exemplo n.º 12
0
def re_variable(x):
    r"""
    >>> v('{word}')
    '(?P<word>[a-zA-Z][a-zA-Z_0-9]*)'
    >>> v('{word:int}')
    '(?P<word>\\d+)'
    """
    R = Re('{([^:}]+)(:[^:}]+)?}', re.I)
    assert R.fullmatch(x), f'{x!r} does not match {R.pattern!r}'
    a, b = R.fullmatch(x).groups()
    r = ('[a-zA-Z][a-zA-Z_0-9]*' if not b or b.lstrip(':') == 'str' else
         '\d+' if b.lstrip(':') == 'int' else '[a-zA-Z/][a-zA-Z_0-9/]*'
         if b.lstrip(':') == 'url' else throw(Exception))
    return f'(?P<{a}>{r})'
Exemplo n.º 13
0
def load(string):
    """Attempt to parse HyTek race results into a Python dictionary.  Because
    some race results may have different column orders, add new ones, or leave
    standard columns out, this may fail.  If the required information cannot be
    extracted, a LoadError is raised."""
    #Building block patterns
    first_name = "[A-Z]\w*"
    last_name= "[A-Z](\w|')*([ -](\w|')+)?"
    last_first = last_name + ", " + first_name
    first_last = first_name + " " + last_name
    freshman = "F[rR]"
    sophomore = "S[oOpP]"
    junior = "J[rR]"
    senior = "S[rR]"
    year = "|".join([freshman, sophomore, junior, senior])
    #Create a dictionary of the patterns
    field_order = ("place", "bib", "name", "year", "team", "time", "points")
    patterns = dict(place="\d+", bib="#?\d+", year=r"\b(" + year + r")\b",
                    name=last_first + "|" + first_last, team="[A-Z]\D*",
                    time="\d+:\d\d(\.\d{1,2})", points=r"\d+")
    #Apply names to all the patterns
    for field, pattern in patterns.iteritems():
        patterns[field] = "(?P<%s>%s)" % (field, pattern)
        #These fields are optional
        if field in ("bib", "year", "points"):
            patterns[field] += "?"
    row = "\s*".join(patterns[field] for field in field_order)
    pattern = Regex(row)
    #Apply regular expressions
    cleanup = {"place": int, "team": str.strip, "points": int, "time":
               RaceTime.from_string}
    results = []
    for i, line in enumerate(string.splitlines()):
        if len(line) == 0 or line.isspace():
            continue
        match = pattern.search(line)
        if match is None:
            raise LoadError("Line %d: \"%s\" does not match /%s/." %
                            (i + 1, line, row))
        results.append(Finisher(None, None))
        for field in patterns.iterkeys():
            value = match.group(field)
            try:
                setattr(results[-1], field, cleanup[field](value))
            except (AttributeError, KeyError, TypeError):
                setattr(results[-1], field, value)
    return results
Exemplo n.º 14
0
def parse_dot_directory(filename):
    try:
      with open(filename) as dot_directory:
        lines = [line.strip('\n') for line in dot_directory]
        Reg = Re('^\\[(.*)\\]$')
        stops = [
            i for i, line in enumerate(lines) if Reg.fullmatch(line)
        ] + [len(lines)]
        for i in range(len(stops) - 1):
            a,b = stops[i], stops[i+1]
            name = Reg.fullmatch(lines[a]).group(1)
            if name == 'rename_today.py':
                for l in lines[a+1:b]:
                    a,b = l.split('=', maxsplit=1)
                    yield a,b
    except FileNotFoundError:
      return 
Exemplo n.º 15
0
Arquivo: GHDL.py Projeto: krabo0om/PoC
def GHDLAnalyzeFilter(gen):
	filterPattern = r".+?:\d+:\d+:(?P<warning>warning:)? (?P<message>.*)"			# <Path>:<line>:<column>:[warning:] <message>
	filterRegExp  = RegExpCompile(filterPattern)

	for line in gen:
		filterMatch = filterRegExp.match(line)
		if (filterMatch is not None):
			if (filterMatch.group('warning') is not None):
				yield LogEntry(line, Severity.Warning)
				continue

			message = filterMatch.group('message')
			if message.endswith("has changed and must be reanalysed"):
				raise GHDLReanalyzeException(message)
			yield LogEntry(line, Severity.Error)
			continue

		yield LogEntry(line, Severity.Normal)
Exemplo n.º 16
0
def load(string, required=None, callbacks=None):
    """Parse the given string.  Raises a ParseError if any problems are
    encountered, including if any field in required is not included.  Callbacks
    is a dictionary of functions, listed by field, with the signature (mapping,
    line_number)."""
    if callbacks is None:
        callbacks = {}
    lines = string.splitlines()
    mapping = {}
    #Skip blank lines at the beginning of the file
    offset = 0
    for offset, line in enumerate(lines):
        if line and not line.isspace():
            break
    current_line, lines = lines[offset], lines[offset:]
    current_line_number = offset
    indent_size = get_indent(current_line)
    comment = Regex(r"(?<!\\)#.*$")
    line_numbers = {}
    for line_number, indented_line in enumerate_with_offset(lines, offset + 1):
        indent, line = indented_line[:indent_size], indented_line[indent_size:]
        line = comment.sub("", line)
        if indent and not indent.isspace():
            raise ParseError("Unexpected unindent.", line_number,
                             indented_line)
        if not line or line[0].isspace():
            current_line += line
        else:
            field, value = pairify(current_line, current_line_number)
            mapping[field] = value
            line_numbers[field] = current_line_number
            current_line = line
            current_line_number = line_number
    field, value = pairify(current_line, current_line_number)
    mapping[field] = value
    line_numbers[field] = current_line_number
    if required is not None:
        for field in required:
            if field not in mapping:
                raise ParseError("Required field \"%s\" missing." % field)
    for field in callbacks.iterkeys():
        line_number = line_numbers[field] if field in line_numbers else None
        callbacks[field](mapping, line_number)
    return mapping
Exemplo n.º 17
0
def search_string(regex: re.compile, input_string: str):
    """Construct a regular expression and match it in the passed string"""

    match = regex.search(input_string)

    try:
        string = match.group().strip()
        return string
    except:
        return 'None'
Exemplo n.º 18
0
    def __init__(self,
                 str_format='%s%s',
                 re_format='(?<Payload>.*)',
                 repr_show=None,
                 ):
        """
        repr_show list is attributes that get coughed out on print
        """
	if not repr_show: repr_show = []
        self.__str_format = str_format
        self.__re_format = REC(re_format)
        self.__repr_show = repr_show
Exemplo n.º 19
0
	def __WriteGtkWaveSection(self, binPath):
		if (self._host.Platform == "Windows"):
			gtkwPath = binPath / "gtkwave.exe"
		else:
			gtkwPath = binPath / "gtkwave"

		if not gtkwPath.exists():
			raise ConfigurationException("Executable '{0!s}' not found.".format(gtkwPath)) from FileNotFoundError(
				str(gtkwPath))

		# get version and backend
		output = check_output([str(gtkwPath), "--version"], universal_newlines=True)
		version = None
		versionRegExpStr = r"^GTKWave Analyzer v(.+?) "
		versionRegExp = RegExpCompile(versionRegExpStr)
		for line in output.split('\n'):
			if version is None:
				match = versionRegExp.match(line)
				if match is not None:
					version = match.group(1)

		self._host.PoCConfig[self._section]['Version'] = version
Exemplo n.º 20
0
 def __init__(
     self,
     str_format='%s%s',
     re_format='(?<Payload>.*)',
     repr_show=None,
 ):
     """
     repr_show list is attributes that get coughed out on print
     """
     if not repr_show: repr_show = []
     self.__str_format = str_format
     self.__re_format = REC(re_format)
     self.__repr_show = repr_show
Exemplo n.º 21
0
def transform(string, year):
    """Where possible, replace all runner's with their database ID numbers.
    May raise a key error."""
    result_list = load(string)
    schools = [(school, Regex("|".join(school.names())))
               for school in Schools.select()]
    for (school, pattern) in schools:
        for item in result_list:
            if pattern.match(item["school"]):
                item["school_id"] = school.id
                del item["school"]
    join = INNERJOINOn(Runners, Affiliations,
                       Runners.q.id == Affiliations.q.runner)
    for runner in Runners.select(Affiliations.q.year == year, join=join):
        for name in runner.given_names:
            last_first = r"%s,\s*%s" % (runner.surname, name)
            first_last = r"%s\s*%s" % (name, runner.surname)
            pattern = Regex(last_first + "|" + first_last, IGNORECASE)
            for item in result_list:
                if pattern.match(item["name"]):
                    item["runner_id"] = runner.id
                    del item["name"]
                    del item["school_id"]
    return dump(result_list)
def find_matching_location(event: dict, filename):
    if filename not in _ics_cache:
        _ics_cache[filename] = list(parse_ics_vevent(filename))
    event_list = _ics_cache[filename]

    a = event
    gt, ga = Re('(InGe|IrBi|IrCi|IrAr)(\d*)',
                re.I).search(a['SUMMARY']).groups()

    LT = [
        b for b in event_list for _ in [b['DTSTART;TZID=Europe/Brussels']]
        for _ in [datetime.strptime(_, '%Y%m%dT%H%M%S')]
        for _ in [naive_to_utc(_)] for _ in [_.strftime('%Y%m%dT%H%M%S')]
        if _ == a['DTSTART'][:-1]
    ]

    L = [
        b for b in LT for ddict in [event_desc_dict(b)] for m in [
            Re('(INGE|IRBI|IRAR|IRCI) - (?:groupe |gr.)(\d+)', re.I).search(
                b['DESCRIPTION'])
        ] if m for ogt, oga in [m.groups()]
        if int(ga) == int(oga) and gt.lower() == ogt.lower()
    ] if gt.lower() != 'irar' else [
        b for b in LT for ddict in [event_desc_dict(b)]
        if Re('irar', re.I).search(b['DESCRIPTION'])
        and not Re('Théorie', re.I).search(b['DESCRIPTION'])
    ]
    # TODO: use event_groups(b) :  "B1-IRBI - gr.5" "B1-INGE - groupe 04" "B1-IRAR"
    # TODO: use event_acti(b)

    if len(L) == 1:
        return L[0]['LOCATION']
    elif len(L) > 1:
        return 'Gehol Too Much {}'.format(' '.join(l['LOCATION'] for l in L))
    elif len(L) == 0:
        return 'Gehol Not Found'
Exemplo n.º 23
0
class b64Formatter(object):
    def __init__(self,
                 str_format='%s%s',
                 re_format='(?<Payload>.*)',
                 repr_show=None,
                 ):
        """
        repr_show list is attributes that get coughed out on print
        """
	if not repr_show: repr_show = []
        self.__str_format = str_format
        self.__re_format = REC(re_format)
        self.__repr_show = repr_show


    def __fold_to_width(self, fold='',
                        width=64):
        return '\n'.join(
            [ fold[i:i+width] for i in xrange(0, len(fold), width) ]
            )


    def __repr__(self):
        """
        preface fields in the repr_show list
        with 'NOHEX_' to get the raw value
        rather than the hexlified value
        """
        ret = u'[%s]\n' % type(self).__name__
        for K in self.__repr_show:
            if not hasattr(self, K): continue
            if K[:6] == 'NOHEX_' and getattr(self, K[6:]) is not None:
                ret += '%s: %s\n' % (K[6:], getattr(self, K[6:]))
            elif getattr(self, K) is not None:
                ret += '%s: %s\n' % (K, binascii.hexlify(getattr(self, K)))
        return ret


    def deserialize(self, msg=''):
        """
        Message() format:
          b2a/json -> [header|payload]/b2a -> encryption -> json/b2a

        EncryptedHandshake() format:
          b2a -> encryption -> json/b2a
        """
        ParseMe = msg
        Format = self.__re_format.search(msg)
        if Format: ParseMe = Format.group('Payload')
        try: return json.loads(
                binascii.a2b_base64(
                        ParseMe))
        except ValueError:  pass
        except binascii.Error: pass
        try: return json.loads(
            ParseMe)
        except ValueError: pass
        try: return binascii.a2b_base64(
            ParseMe)
        except:
            raise(Broken_Format(
                    'For the life of me, I cannot parse this %s' % type(self).__name__))


    def serialize(self,
                  Payload=None,
                  b64=True,
                  Format=True,):
        """
        Message()s may be pyaxo-compatible
        but there isn't a Handshake() spec for pyaxo
        so again, pushing actual dumps() to subclass
        """
	if not Payload: Payload = {}
        sPayload = Payload
        if type(Payload) is DictType:
            sPayload = json.dumps(Payload)
        if not b64: return sPayload
        bPayload = binascii.b2a_base64(sPayload)
        if not Format: return bPayload
        else:
            fPayload = self.__fold_to_width(bPayload)
            return self.__str_format % (TTS.LOOKINGGLASS_VERSION_STRING, fPayload) 


    def to_b64(self, Payload=None):
        """
        fields in the payload prefaced with 'b64_'
        are b64 encoded

        special key 'nonce' is filled with random bytes
        up to the amount specified in the 'nonce' field

        everything else goes straight through
        """
	if not Payload: Payload = {}
        bPayload = {}
        for Key, Value in Payload.items():
	    if Key[:4] == 'b64_':
	        bPayload[Key] = binascii.b2a_base64(
			hulk_smash_unicode(Value)
			).strip()
            elif Key == 'nonce':
                bPayload[Key] = binascii.b2a_base64(
                    urandom(randrange(Value))).strip()
            else:
                bPayload[Key] = hulk_smash_unicode(Value)
        return bPayload


    def from_b64(self, bPayload=None):
        """
        drops nonce values
        
        decodes fields prefaced by 'b64_' and
        strips that identifier
        """
        if type(bPayload) is not DictType:
                return None
        Payload = {}
        for Key, Value in bPayload.items():
            if Key[:4] == 'b64_':
                Payload[Key[4:]] = binascii.a2b_base64(Value)
            elif Key == 'nonce':
                continue
            else:
                Payload[Key] = Value
        return Payload
Exemplo n.º 24
0
class b64Formatter(object):
    def __init__(
        self,
        str_format='%s%s',
        re_format='(?<Payload>.*)',
        repr_show=None,
    ):
        """
        repr_show list is attributes that get coughed out on print
        """
        if not repr_show: repr_show = []
        self.__str_format = str_format
        self.__re_format = REC(re_format)
        self.__repr_show = repr_show

    def __fold_to_width(self, fold='', width=64):
        return '\n'.join(
            [fold[i:i + width] for i in xrange(0, len(fold), width)])

    def __repr__(self):
        """
        preface fields in the repr_show list
        with 'NOHEX_' to get the raw value
        rather than the hexlified value
        """
        ret = u'[%s]\n' % type(self).__name__
        for K in self.__repr_show:
            if not hasattr(self, K): continue
            if K[:6] == 'NOHEX_' and getattr(self, K[6:]) is not None:
                ret += '%s: %s\n' % (K[6:], getattr(self, K[6:]))
            elif getattr(self, K) is not None:
                ret += '%s: %s\n' % (K, binascii.hexlify(getattr(self, K)))
        return ret

    def deserialize(self, msg=''):
        """
        Message() format:
          b2a/json -> [header|payload]/b2a -> encryption -> json/b2a

        EncryptedHandshake() format:
          b2a -> encryption -> json/b2a
        """
        ParseMe = msg
        Format = self.__re_format.search(msg)
        if Format: ParseMe = Format.group('Payload')
        try:
            return json.loads(binascii.a2b_base64(ParseMe))
        except ValueError:
            pass
        except binascii.Error:
            pass
        try:
            return json.loads(ParseMe)
        except ValueError:
            pass
        try:
            return binascii.a2b_base64(ParseMe)
        except:
            raise (Broken_Format('For the life of me, I cannot parse this %s' %
                                 type(self).__name__))

    def serialize(
        self,
        Payload=None,
        b64=True,
        Format=True,
    ):
        """
        Message()s may be pyaxo-compatible
        but there isn't a Handshake() spec for pyaxo
        so again, pushing actual dumps() to subclass
        """
        if not Payload: Payload = {}
        sPayload = Payload
        if type(Payload) is DictType:
            sPayload = json.dumps(Payload)
        if not b64: return sPayload
        bPayload = binascii.b2a_base64(sPayload)
        if not Format: return bPayload
        else:
            fPayload = self.__fold_to_width(bPayload)
            return self.__str_format % (TTS.LOOKINGGLASS_VERSION_STRING,
                                        fPayload)

    def to_b64(self, Payload=None):
        """
        fields in the payload prefaced with 'b64_'
        are b64 encoded

        special key 'nonce' is filled with random bytes
        up to the amount specified in the 'nonce' field

        everything else goes straight through
        """
        if not Payload: Payload = {}
        bPayload = {}
        for Key, Value in Payload.items():
            if Key[:4] == 'b64_':
                bPayload[Key] = binascii.b2a_base64(
                    hulk_smash_unicode(Value)).strip()
            elif Key == 'nonce':
                bPayload[Key] = binascii.b2a_base64(urandom(
                    randrange(Value))).strip()
            else:
                bPayload[Key] = hulk_smash_unicode(Value)
        return bPayload

    def from_b64(self, bPayload=None):
        """
        drops nonce values
        
        decodes fields prefaced by 'b64_' and
        strips that identifier
        """
        if type(bPayload) is not DictType:
            return None
        Payload = {}
        for Key, Value in bPayload.items():
            if Key[:4] == 'b64_':
                Payload[Key[4:]] = binascii.a2b_base64(Value)
            elif Key == 'nonce':
                continue
            else:
                Payload[Key] = Value
        return Payload
Exemplo n.º 25
0
#! /usr/bin/env python

#  $ ./foo.py
#  BEFORE: A \\" \\\" Z
#  AFTER : A \\ \\\" Z
#  
#  BEFORE: A \\\" \\" Z
#  AFTER : A \\\" \\ Z

from re import compile as Regex

def remove_first_group(m):
	start = m.start(1) - m.start(0)
	end = m.end(1) - m.start(0)
	whole_match = m.group(0)

	return whole_match[:start] + whole_match[end:]

unescaped_doublequote = Regex(r'(?<!\\)(?:\\\\)*(")')

for test in (
		r'A \\" \\\" Z',
		r'A \\\" \\" Z',
):
	print 'BEFORE:', test
	print 'AFTER :', unescaped_doublequote.sub(remove_first_group, test)
	print


Exemplo n.º 26
0
GROUPS = GROUPS_PREFIXES

DAYS = 'lun|mar|mer|jeu|ven|sam|dim'.split('|')
DAY = Re('|'.join(DAYS))

HOUR = Re('(\d+)[h:](\d*)')
HOUR_FROM = Re('(de|from)(\d+)[h:](\d*)')
HOUR_TO = Re('(à|to)(\d+)[h:](\d*)')
DATE = Re('(\d+)/(\d+)')
WEEK_SHIFT = Re('([+-]\d*)w')

GROUP = Re('(' + '|'.join(map(re.escape, map(str, GROUPS))) + ')(\d*)', re.I)
SEANCE = Re('S({})'.format({'num': '\d+', 'letters':'\w+', 'mixed':'[\d\w]+'}[SEANCES]), re.I) if SEANCES else Re('')
ASSISTANT = Re('|'.join(map(re.escape, ASSISTANTS)), re.I)

DURATION_RE = Re('(\d+)h(\d\d)?(min)?|(\d+)min')
assert DURATION_RE.fullmatch(ALL_DURATION), '"{ALL_DURATION}" is not like "6h" or "6h30" or "5min"'

def convert_duration(string):
    hour, minute, _, single_minute = DURATION_RE.fullmatch(string).groups()
    if single_minute:
        return timedelta(minutes=int(single_minute))
    else:
        return timedelta(hours=int(hour), minutes=int(minute or 0))

DURATION = convert_duration(ALL_DURATION)

try:
    import search_for_ics_gehol
except ImportError:
    print('[Warning] no module search_for_ics_gehol, locations will be empty')
Exemplo n.º 27
0
#! /usr/bin/env python

#  $ ./foo.py
#  BEFORE: A \\" \\\" Z
#  AFTER : A \\ \\\" Z
#
#  BEFORE: A \\\" \\" Z
#  AFTER : A \\\" \\ Z

from re import compile as Regex


def remove_first_group(m):
    start = m.start(1) - m.start(0)
    end = m.end(1) - m.start(0)
    whole_match = m.group(0)

    return whole_match[:start] + whole_match[end:]


unescaped_doublequote = Regex(r'(?<!\\)(?:\\\\)*(")')

for test in (
        r'A \\" \\\" Z',
        r'A \\\" \\" Z',
):
    print 'BEFORE:', test
    print 'AFTER :', unescaped_doublequote.sub(remove_first_group, test)
    print
Exemplo n.º 28
0
 oSwap4Blanks = getReplaceManyOldWithBlanksSwapper( ( digits, uppercase, lowercase ) )
 #
 if oSwap4Blanks( sTest ) != ' ' * 82:
     #
     lProblems.append( 'getReplaceManyOldWithBlanksSwapper()' )
     #
 if ReplaceManyOldWithBlanks( sTest, ( digits, uppercase, lowercase ) ) != ' ' * 82:
     #
     lProblems.append( 'ReplaceManyOldWithBlanks()' )
     #
 if get_obsoleteGlobalReplaceWithSwapper( sTest, oSwap4Blanks ) != ' ' * 82:
     #
     lProblems.append( 'get_obsoleteGlobalReplaceWithSwapper()' )
     #
 #
 oMatch = REcompile( 'l.+e' )
 #
 if oMatch.sub( getBlanksForReMatchObj,
         'Mary had a little lamb.' ) != \
         'Mary had a        lamb.':
     #
     lProblems.append( 'getBlanksForReMatchObj()' )
     #
 #
 sWhiteChars = '\n\t\rabc\r\t\n'
 #
 if      getSpaceForWhiteAlsoStrip( sWhiteChars  ) != 'abc' or \
         getSpaceForWhiteAlsoStrip( sWhiteChars+sWhiteChars ) != 'abc abc':
     #
     lProblems.append( 'getSpaceForWhiteAlsoStrip()' )
     #
def group_id_from_descdict(dic):
    m = Re('groupe (\d+)').search(dic['Groupes'])
    return 0 if not m else int(m.group(1))
Exemplo n.º 30
0
               help='''
    can be a string like 9h meaning "the next 09:00"
''')
a = args = parser.parse_args()

x = a.fuzzy_time

from datetime import date, time, datetime, timedelta

C = datetime.combine

N = datetime.now()

from re import compile as Re

R = Re('(\d+)[h:](\d*)')

match = R.fullmatch(x)
if not match:
    import sys
    print("Wrong match, must be", R.pattern, file=sys.stderr)
else:
    h, m = match.groups()
    h, m = int(h), int(m or '0')

    d = N.replace(hour=h, minute=m, second=0, microsecond=0)
    if d < N:
        d += timedelta(days=1)

    delta = d - N
Exemplo n.º 31
0
    def processMails(self, text, att_file):
        """
        Parse mail for display in XBMC
        """
        myemail = email.message_from_string(text)
        p = EmailParser()
        msgobj = p.parsestr(text)
        if msgobj['Subject'] is not None:
            decodefrag = decode_header(msgobj['Subject'])
            subj_fragments = []
            for s , enc in decodefrag:
                if enc:
                    s = unicode(s , enc).encode('utf8','replace')
                subj_fragments.append(s)
            subject = ''.join(subj_fragments)
        else:
            subject = None
        if msgobj['Date'] is not None:
            date = msgobj['Date']
        else:
            date = '--'
        Sujet = subject
        realname = parseaddr(msgobj.get('From'))[1]

        body = None
        html = None
        for part in msgobj.walk():
            content_disposition = part.get("Content-Disposition", None)
            prog = re.compile('attachment')
            #Retrouve le nom des fichiers attaches
            if prog.search(str(content_disposition)):
                file_att = str(content_disposition)

                pattern = Pattern(r"\"(.+)\"")
                att_file +=  str(pattern.findall(file_att))

            if part.get_content_type() == "text/plain":
                if body is None:
                    body = ""
                try :
                    #Si pas de charset défini
                    if (part.get_content_charset() is None):
                        body +=  part.get_payload(decode=True)
                    else:
                        body += unicode(
                           part.get_payload(decode=True),
                           part.get_content_charset(),
                           'replace'
                           ).encode('utf8','replace')
                except Exception, e:
                    body += "Erreur unicode"
                    print "BODY = %s " % body
            elif part.get_content_type() == "text/html":
                if html is None:
                    html = ""
                try :
                    unicode_coded_entities_html = unicode(BeautifulStoneSoup(html,
                            convertEntities=BeautifulStoneSoup.HTML_ENTITIES))

                    html += unicode_coded_entities_html
                    html = html2text(html)
                except Exception, e:
                    html += "Erreur unicode html"
Exemplo n.º 32
0
assert txt_file.endswith('.txt')
new_name = txt_file + '.html'

import sys, html
from re import compile as Re
import generate_utils
try:
    from generate_utils import OutFileGreen as OutFile
except ImportError:
    OutFile = open

with open(txt_file) as file:
    string = file.read()

R = Re('\d?\d:\d+\d+')
A = R.split(string)
B = R.findall(string)

if not (A and A[0].strip() == ''):
    raise ValueError("Must begin with a time")

assert len(A) == 1 + len(
    B), "the programmer did not understand re.split and re.findall"

bits = []
for i in range(len(B)):
    b, a = A[i + 1], B[i]
    x, y = a.split(':')
    x, y = int(x), int(y)
    time = x * 60 + y
Exemplo n.º 33
0
def remove_comments(lines):
    """Remove all lines containing a comment."""
    comment_line = Regex("^\s*#.*$")
    eol_comment = Regex(r"(?<!\\)#.*$")
    return [eol_comment.sub("", line) for line in lines if not comment_line.match(line)]