Esempio n. 1
0
File: items.py Progetto: Xion/hncli
    def from_html(story_id, tag):
        ''' Constructs the Comment from HN site markup.
        'tag' argument is BeatifulSoup object for
        <span> tag with class=comment.
        '''
        if not (tag.name == 'span' and 'comment' in tag['class']):
            return

        parent_tr = tag.find_parent('tr')
        head_span = parent_tr.find('span', {'class': 'comhead'})
        indent_img = parent_tr.find('img', src=regex(r'.*/images/s\.gif'))
        reply_link = parent_tr.find('a', href=regex(r'reply\?.+'))

        comment = {
            'story_id': story_id,
            'url': head_span.find('a', href=regex(r'item\?id\=\d+'))['href'],
            'author': head_span.find('a', href=regex(r'user\?id\=.+')).text,
            'text': tag.text.strip(),
            'time': list(head_span.strings)[-2].replace('|', '').strip(),
            'level': int(indent_img['width']) / 40, # magic number of pixels
            'parent': None,
            'replies': [],
            'reply_url': reply_link['href'] if reply_link else None,
        }

        url = comment['url']
        comment['id'] = int(url[url.find('=')+1:]),
        return Comment(**comment)
Esempio n. 2
0
    def parse(self):

        # Do not bother parsing the setting file data if there is no data to parse.
        if self.data is not None:

            # Sift through the setting file data line-by-line.
            for line in self.data:

                # Do not parse comments.
                if not line.startswith('//'):

                    # We need to determine if we're declaring a variable.
                    tmp_decl = regex(r'(.*) \= (.*)\n', line)
                    for decl in tmp_decl:

                        # Determine if our entry is a boolean or a number.
                        value = f'{decl[1]}'

                        if decl[1] == 'TRUE':
                            value = True
                        elif decl[1] == 'FALSE':
                            value = False
                        elif is_numeric(decl[1]):
                            value = int(decl[1])

                        # Update our dictionary with a new entry.
                        self.items.update({f'{decl[0]}': value})

                    # We need to determine if we're declaring an array.
                    tmp_arr = regex(r'(.*) \=\> \[(.*)\]\n', line)
                    for arr in tmp_arr:

                        # Update our dictionary with a new entry.
                        self.items.update(
                            {f'{arr[0]}': check_array(arr[1].split(', '))})
Esempio n. 3
0
def get_next(image, query):
    try:
        sleep(settings.timeout)
        r1 = requests.get(
            f'https://chan.sankakucomplex.com/post/index.content?next={image}&tags={query}',
            headers=host_headers)

        if r1.status_code > 399:
            raise Exception(f'{r1.status_code}: {r1.reason}')

        links = regex(r'\<a href\=\"(.*?)\" onclick\=\"', r1.text)
        nextid = get_latest(query, links[-1].split('/')[-1])

        sleep(settings.timeout)
        r2 = requests.get(
            f'https://chan.sankakucomplex.com/post/index.content?next={nextid}&tags={query}',
            headers=host_headers)

        if r2.status_code > 399:
            raise Exception(f'{r2.status_code}: {r2.reason}')

        lonks = regex(r'\<a href\=\"(.*?)\" onclick\=\"', r2.text)
        for link in lonks:
            get_image(link, query)

        get_next(nextid, query)

    except Exception as ex:
        sys.stderr.write(f'get_next: {ex}\n')
Esempio n. 4
0
File: items.py Progetto: Xion/hncli
    def from_html(main_row, subtext_row):
        ''' Constructs Story from HN site markup elements.
        Arguments are <tr> elements obtained with BeautifulSoup.
        '''
        link = main_row.find_all('td')[2].a
        vote_td = main_row.find_all('td')[1]
        subtext = subtext_row.find('td', {'class': 'subtext'})
        comments_link = subtext.find('a', href=regex(r'item\?id\=.+'))
        not_job = bool(comments_link)

        story = {'title': link.text, 'url': link['href']}
        if not_job:
            points = cast(int, subtext.find('span', id=regex(r'score_\d+')
                                            ).text.split()[0], default=0)
            comments_count = cast(int, comments_link.text.split()[0],
                                  default=0)
            story.update({
                'author': subtext.find('a', href=regex(r'user\?id\=.+')).text,
                'points': points,
                'time': list(subtext.strings)[-2].replace('|', '').strip(),
                'comments_count': comments_count,
                'comments_url': comments_link['href'],
                'upvote_url': vote_td.find('a', id=regex(r'up_\d+'))['href'],
            })
            url = story['comments_url']
        else:
            story['time'] = subtext.text
            url = story['url']

        story['id'] = int(url[url.find('=')+1:])
        return Story(**story)
Esempio n. 5
0
def get_utilization(util_rpt):
    '''
    Extract the number of CLB LUTs and CLB registers from a Vivado utilization report.
    They indicate design complexity, and are a rough proxy for the area of an ASIC.

...
1. CLB Logic
------------

+----------------------------+--------+-------+-----------+-------+
|          Site Type         |  Used  | Fixed | Available | Util% |
+----------------------------+--------+-------+-----------+-------+
| CLB LUTs                   | 146578 |     0 |   1182240 | 12.40 |
                               ^^^^^^
|   LUT as Logic             | 141704 |     0 |   1182240 | 11.99 |
|   LUT as Memory            |   4874 |     0 |    591840 |  0.82 |
|     LUT as Distributed RAM |   3524 |     0 |           |       |
|     LUT as Shift Register  |   1350 |     0 |           |       |
| CLB Registers              | 121495 |     2 |   2364480 |  5.14 |
...                            ^^^^^^
                               these two numbers
    '''
    lut_pat = regex(r'^\| CLB LUTs +\| +(\d+) \|')
    reg_pat = regex(r'^\| CLB Registers +\| +(\d+) \|')
    found = find(util_rpt, lut_pat, reg_pat)
    if found:
        return {"CLB_LUTs": int(found[0]), "CLB_regs": int(found[1])}
    else:
        raise NotFound('CLB LUTs and Registers in {}'.format(util_rpt))
Esempio n. 6
0
def post_processing(wb):
    """
        Work order pre-processing
        Meant to be run after operations are entered

        Order of operations:
            1) Save pre-subtotalled document
            2) Fill out CDS(TagSchedule)
            3) Fill out ProductionDemand spreadsheet
            4) Import WBS-split data
    """

    sheet = wb.sheets[0]

    # get header column IDs
    header = HeaderParser()
    header.add_header_aliases(HEADER_ALIASES)

    job = sheet.range(2, header.job).value
    shipment = sheet.range(2, header.shipment).value

    # open documents for writing
    workorder = WorkOrder(job, shipment)
    ts = TagSchedule(job, shipment)
    demand = ProductionDemand()
    sndb = SNDB()

    # matching regexes
    WEB_RE = regex(r"\w+-[Ww]\w+")
    FLG_RE = regex(r"\w+-[TtBb]\w+")
    PART_RE = regex(r"\A(PL|SHT|SHEET|MISC)")

    i = 2
    while sheet.range(i, 1).value:
        row = header.parse_row(sheet.range(i, 1).expand('right').value)

        # 1) Save pre-subtotalled document
        workorder.add_row(row)

        # 2) Fill out CDS(TagSchedule)
        if WEB_RE.match(row.mark):
            ts.webs.add(row)
        elif FLG_RE.match(row.mark):
            ts.flange.add(row)
        elif PART_RE.match(row.part_size):
            if row.remark and row.remark != 'Blank':
                ts.code_delivery.add(row)
        else:
            # shape items: do not push to SigmaNest
            continue

        # 3) Fill out ProductionDemand spreadsheet
        demand.add(row)

        # 4) Import WBS-split data
        sndb.updateWbsPartMapping(row)

        i += 1
Esempio n. 7
0
 def __init__(self, name, pathPatterns, filePatterns, all_conditions = False):
   self.name = name
   self.allcond = all_conditions
   self.fpat = []
   self.ppat = []
   for pat in filePatterns:
     self.fpat.append(regex(pat))
   for pat in pathPatterns:
     self.ppat.append(regex(pat))
Esempio n. 8
0
 def __init__(self, name, pathPatterns, filePatterns, all_conditions=False):
     self.name = name
     self.allcond = all_conditions
     self.fpat = []
     self.ppat = []
     for pat in filePatterns:
         self.fpat.append(regex(pat))
     for pat in pathPatterns:
         self.ppat.append(regex(pat))
def __modifyfile(filename, tmpfilename, processline_Fn):
    try:
        origf = open(filename, "r")
        newf = open(tmpfilename, "w+")

        persistentdata = {}
        while True:
            line = origf.readline()
            if len(line) == 0:  # EOF
                processline_Fn(line, newf, persistentdata)
                break
            else:
                processline_Fn(line, newf, persistentdata)

    except StopIteration:
        print("Verified File: {}".format(filename))
        newf.close()
        newf = None
        del_file(tmpfilename)
        return ()
    except Exception as err:
        eprint(err)
        newf.close()
        newf = None
        del_file(tmpfilename)  # delete tmp file
    finally:
        origf.close()
        if newf is not None:
            newf.close()

    pathsplitter = regex(r'\\')
    pathmapper = regex(r'^C:/(.*)$')
    cygwin_filename = pathmapper.sub(  # swap filename to path from inside cygwin
        '/cygdrive/c/\\1', '/'.join(pathsplitter.split(tmpfilename)))
    try:
        # convert file type with dos2unix
        exitcode = __runcygcmd(
            "/usr/bin/dos2unix.exe {file}".format(file=cygwin_filename))
        if exitcode != 0:
            raise (Exception("Failed to convert file to unix file."))

        # replace original file
        mv(tmpfilename, filename)

    except Exception as err:
        eprint(err)
        raise (err)
    else:
        print("Modified: {}".format(filename))
def check_prereqs():
    '''
	Validate environment & dependencies exist for script
	'''
    error_count = 0
    print("Checking script environment...")
    compatible_os = regex(r'Windows')
    if not compatible_os.match(get_os()):
        eprint(
            "FAILED: This script only works on Windows with administrator privileges."
        )
        error_count -= 1
    else:
        try:
            # Check running as administrator (only Administrator rights can see the temp dir)
            ls(p.join(ossep, 'Windows', 'temp'))

        except PermissionError:
            eprint(
                '\n' +
                "FAILED: Script needs to be run with administrator privileges."
            )
            eprint(
                "PowerShell w/ Admin Privalages command: \n\t Start-Process powershell -Verb RunAs"
            )
            error_count -= 1

    # TODO: check for required cygwin_pubring.asc public key file

    # VALID Environment
    if error_count != 0:
        return (error_count)
    else:
        print("PASS")
        return (0)
Esempio n. 11
0
def get_filtered_file(
        filename: Text,
        stop_words: Optional[Container[Text]] = None) -> Tokens_str:
    """
    Get the filtered version of a single file.

    Parameters
    ----------
    filename: Text
        The name of the file we are reading from

    stop_words: Optional[Container[Text]] :
        Path of file consisting of extra stopwords to consider
        (Default value = None)

    Returns
    -------
    Tokens_str
        The string version of all the valid tokens in the file.

    """
    from re import compile as regex

    ws_filter = regex(r"\s+")
    with open(filename, 'rb') as f:
        decoded_str = f.read().decode(errors="ignore").strip().lower()
        return filter_tokens(ws_filter.split(decoded_str), stop_words)

    raise ValueError("Invalid File name!")
Esempio n. 12
0
 def command(self, command='', flush=True, attempts=5):
     event = self.db.log_event('Information',
                               'DUT' if not self.aux else 'AUX',
                               'Command',
                               command,
                               success=False)
     for attempt in range(attempts):
         self.write('{}\n'.format(command))
         buff, returned = self.read_until(flush=flush)
         if command and command not in buff and not regex(  # sorry not sorry
                 escape('_____'.join(command)).replace('_____', '.*'),
                 DOTALL).search(buff):
             if attempt < attempts - 1:
                 self.db.log_event('Warning',
                                   'DUT' if not self.aux else 'AUX',
                                   'Command error', buff)
                 sleep(5)
             else:
                 raise DrSEUsError('{} Command error'.format(
                     'DUT' if not self.aux else 'AUX'),
                                   returned=returned)
         else:
             event.success = True
             event.timestamp = datetime.now()
             event.save()
             break
     return buff, returned
Esempio n. 13
0
    def load_from_stream(cls, stream: with_attr("readline"), bol_b: bytes, eol_b: bytes):

        params = {}

        def decode_line(b):
            key, value = b.split(b"=", 1)
            key, value = key.decode("ascii"), a2b_base64(value).decode("utf-8")
            params[key] = value

        # the output lines may contain garbage emitted by any java
        # library and they are therefore filtered and checksummed

        valid_line = regex(b"^.*(?:" + bol_b + b"|" + cls._bol_b + b")" +
                           b"([0-9A-F]{8})( ?[A-Za-z0-9+/=_]*)" +
                           b"(?:" + eol_b + b"|" + cls._eol_b + b").*$")

        def get_next_line(prev_crc32 = 0):
            while True:
                b = stream.readline()
                if not b:
                    return None
                b = b.rstrip()
                if not b:
                    continue
                valid_parts = valid_line.findall(b)
                if len(valid_parts) != 1:
                    pmnc.log.warning("skipping unexpected output: {0:s}".format(str(b)[2:-1]))
                    continue
                next_crc32, bb = int(valid_parts[0][0], 16), valid_parts[0][1]
                if next_crc32 != crc32(bb, prev_crc32):
                    pmnc.log.warning("skipping broken output: {0:s}".format(str(b)[2:-1]))
                    continue
                return bb, next_crc32

        curr_lines = []
        next_line_crc32 = get_next_line()
        if next_line_crc32 is None:
            return None

        next_line, curr_crc32 = next_line_crc32
        while next_line:
            if next_line.startswith(b" "):
                if curr_lines:
                    curr_lines.append(next_line[1:])
                else:
                    raise Exception("invalid folding")
            else:
                if curr_lines:
                    decode_line(b"".join(curr_lines))
                    del curr_lines[:]
                curr_lines.append(next_line)
            next_line_crc32 = get_next_line(curr_crc32)
            if next_line_crc32 is None:
                raise Exception("unexpected eof")
            next_line, curr_crc32 = next_line_crc32

        if curr_lines:
            decode_line(b"".join(curr_lines))

        return cls(**params)
Esempio n. 14
0
 def test0(self):
     r'''
     `_feed` and `_expected_expected` has been manually prepared reading
     `_tree_template`. `expected` is programmatically prepared from
     _tree_template.
     '''
     from ... import TestTree
     from .....util import name_shift
     from .....lib import SimplestFindBy, Str, StringIO
     from re import compile as regex, escape
     b = StringIO()
     _tree_template.feed(b, 'p1.py\n', '', '0')
     _tree_template.feed(b, 'p1+/\n', '', '1')
     feed = b.getvalue()
     expected = _tree_template.expected()
     expected = tuple(expected)
     abs_dir = join(self.tmp, 'a')
     content = _content_feed
     abs_dir = TestTree(abs_dir, feed, _content_feed)
     replace, actual = name_shift(abs_dir)
     for s in replace:
         content = regex(r'\b' + escape(s[0]) + r'\b').sub(s[1], content)
     cmp = []
     find = SimplestFindBy(lambda prefix, infix, y: y.endswith(r'.py'))
     for y in find(Str(abs_dir), ''):
         with abs_dir.joinpath(y).open(r'rt') as istream:
             cmp.append(istream.read() == content)
     expected = [True] * len(cmp), expected
     actual = cmp, actual
     self.assertEqual(expected, actual)
Esempio n. 15
0
    def __init__(self, cores=None, version=None, timeout=60):

        # Start the Comsol server as an external process.
        backend = discovery.backend(version)
        command = backend['server']
        logger.info('Starting external server process.')
        if cores:
            command += ['-np', str(cores)]
            noun = 'core' if cores == 1 else 'cores'
            logger.info(f'Server restricted to {cores} processor {noun}.')
        process = start(command, stdin=PIPE, stdout=PIPE)

        # Wait for it to report the port number.
        t0 = now()
        while process.poll() is None:
            line = process.stdout.readline().decode()
            match = regex(r'^.*listening on port *(\d+)', line)
            if match:
                port = int(match.group(1))
                break
            if now() - t0 > timeout:
                error = 'Sever failed to start within time-out period.'
                logger.error(error)
                raise TimeoutError(error)
        logger.info(f'Server listening on port {port}.')

        # Save useful information in instance attributes.
        self.version = backend['name']
        self.cores = cores
        self.port = port
        self.process = process
Esempio n. 16
0
def get_cpu_clock(timing_rpt):
    '''
    Extract the processor core clock frequency from a Vivado timing report.
    Assumes that the design uses the GFE clock tree.

...
----------------------------- ...
| Clock Summary
| -------------
----------------------------- ...

Clock                         ...  Waveform(ns)           Period(ns)      Frequency(MHz)
-----                         ...  ------------           ----------      --------------
dbg_hub/inst/BSCANID.u_xsdbm_i...  {0.000 25.000}         50.000          20.000          
default_250mhz_clk1_clk_p     ...  {0.000 2.000}          4.000           250.000         
  mmcm_clkout0                ...  {0.000 1.667}          3.333           300.000         
    pll_clk[0]                ...  {0.000 0.208}          0.417           2400.000        
      pll_clk[0]_DIV          ...  {0.000 1.667}          3.333           300.000         
    pll_clk[1]                ...  {0.000 0.208}          0.417           2400.000        
      pll_clk[1]_DIV          ...  {0.000 1.667}          3.333           300.000         
    pll_clk[2]                ...  {0.000 0.208}          0.417           2400.000        
      pll_clk[2]_DIV          ...  {0.000 1.667}          3.333           300.000         
  mmcm_clkout1                ...  {0.000 20.000}         40.000          25.000          
...                                                                       ^^^^^^
                                                                          this number
    '''
    pattern = regex(r'^  mmcm_clkout1 .* (\d+\.\d+) +$')
    found = find(timing_rpt, pattern)
    if found:
        return {'cpu_Mhz': float(found[0])}
    else:
        raise NotFound('mmcm_clkout1 in {}'.format(timing_rpt))
Esempio n. 17
0
def search(a):
    x = []
    r = regex(args.search)
    for b in a:
        if r.search(b) != None:
            x.append(b)
    return x
Esempio n. 18
0
def main():
    "Detect all orphan files and entries"
    g = Graph()
    for manifest in [MANIFEST_PATH, MANIFEST_SYNTAX_PATH,
                     MANIFEST_TURTLE_LDPATCH_PATH,]:
        g.load(manifest, format="turtle")
        LOG.debug("loaded %s", manifest)
    for manifest in [MANIFEST_TURTLE_PATH,]:
        if exists(manifest):
            g.load(manifest, format="turtle")
            LOG.debug("loaded %s", manifest)

    # looking for entries that are not part of an mf:entry list
    for subj, _, _ in g.triples((None, MF.action, None)):
        if not list(g.triples((None, RDF.first, subj))):
            print subj

    # looking for files that are not referenced
    FILTER = regex(r'.*\.(ttl|nt|ldpatch)$')
    for foldername in (TESTSUITE_PATH, TURTLE_PATH):
        for filename in listdir(foldername):
            if not FILTER.match(filename):
                continue
            if filename.startswith("manifest"):
                continue
            iri = p2i(join(foldername, filename))
            if not list(g.triples((None, None, iri))):
                print iri

    # checking that all entries have the correct name
    for subj, _, obj in g.triples((None, MF.name, None)):
        if subj.rsplit("#",1)[1] != unicode(obj):
            print "WRONG NAME for ", subj
Esempio n. 19
0
def get_flag():
    if download(zip_url, zip_filename) != 200:
        print("failed to download file:{}\nurl = {}".format(zip_filename, zip_url))
        return None
    
    extract(zip_filename)
    string = decrypt(encrypted_name)
    return (regex(r"lasactf{(.*)}", string)[0])
Esempio n. 20
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.args.pattern = [
         i.value for p in self.args.pattern for i in indicators
         if fnmatch(i.name, p)
     ]
     self.pattern = '|'.join(F'(?:{p})' for p in self.args.pattern)
     self.log_debug(F'using pattern: {self.pattern}')
     self.pattern = regex(self.pattern.encode('ascii'))
Esempio n. 21
0
def _ed(substs, abs_dir):
    from ..lib import RegexFileEditor, SimplestFindBy
    from re import compile as regex, escape
    olds = (y[0] for y in substs)
    news = tuple(y[1] for y in substs)
    oldpats = tuple(regex(r'\b' + escape(y) + r'\b') for y in olds)
    find = SimplestFindBy(lambda prefix, infix, y: y.endswith(r'.py'))
    for y in find(abs_dir, ''):
        RegexFileEditor(zip(oldpats, news))(abs_dir.joinpath(y))
Esempio n. 22
0
 def test(self):
     from ....lib import Path, RegexFileEditor
     from re import compile as regex
     edit = RegexFileEditor(((regex(r'\ba\b'), 'x'), (regex(r'\b0\b'), '9')))
     a, b = (Path(self.tmp).joinpath(x) for x in ('a', 'b'))
     with a.open(r'wt') as ostream:
         ostream.write(r'abc.a')
     with b.open(r'wt') as ostream:
         ostream.write(r'012.0')
     for x in a, b:
         edit(x)
     with a.open(r'rt') as istream:
         a = istream.read()
     with b.open(r'rt') as istream:
         b = istream.read()
     expected = r'abc.x', r'012.9'
     actual = a, b
     self.assertEqual(expected, actual)
Esempio n. 23
0
def get_flight_ids(html_text):
    __pq = PyQuery(html_text).find("tbody").find("td.inputselect").find(
        "div.content").find("input")
    ids = []
    for __node in __pq.items():
        if (__node.hasClass("radio-ajax")):
            __matched = regex("[A-Z]{2}.*[A-Z]", str(__node.attr("value")))
            ids.extend(__matched)
    return ids
Esempio n. 24
0
def get_filter():
    """a filter which takes out only filenames which probably contain media"""
    extensions = ['avi', 'mpg', 'mpeg', 'mp4', 'mkv', 'ogv',
                  'flv', 'ogg', 'mov', 'mp3', 'ac3', 'rm', 'ram',
                  'wmv', '3gp', 'aac', 'asf', 'h263', 'webm',
                  'm4a', '3g2', 'mj2']
    regexstring = r'\.('
    for extension in extensions:
        regexstring = regexstring + extension + '|'
    regexstring = regexstring[:-1] + ')$'
    return regex(regexstring).search
Esempio n. 25
0
def readConfig():
    global notificationTimeout, errorTimeout, socketTimeout, \
           checkInterval, sources

    currentSource = None
    file = open(getenv('HOME') + '/.config/catfriend', 'r')
    re = regex("^\s*(?:([a-zA-Z_]+)(?:\s+(\S+))?\s*)?(?:#.*)?$")

    checks = []
    for source in sources:
        checks.append(MailSource(source))

    while True:
        line = file.readline()
        if not line: break
        res = re.match(line)
        if not res:
            return line

        res = res.groups()
        if res[0] is None: continue

        if res[0] == "notificationTimeout":
            notificationTimeout = int(res[1])
        elif res[0] == "errorTimeout":
            errorTimeout = int(res[1])
        elif res[0] == "socketTimeout":
            socketTimeout = int(res[1])
        elif res[0] == "checkInterval":
            checkInterval = int(res[1])
        elif res[0] == "host" or res[0] == "imap":
            if currentSource:
                sources.append(currentSource)
            currentSource = MailSource(res[1])
        elif currentSource is None:
            return line
        elif not res[1]:
            if res[0] == "nossl":
                currentSource.noSsl = True
            else:
                return line
        elif res[0] == "id":
            currentSource.id = res[1]
        elif res[0] == "user":
            currentSource.user = res[1]
        elif res[0] == "password":
            currentSource.password = res[1]
        elif res[0] == "cert_file":
            # ignored
            currentSource.cert_file = res[1]
        else:
            return line

    sources.append(currentSource)
Esempio n. 26
0
def readConfig():
    global notificationTimeout, errorTimeout, socketTimeout, \
           checkInterval, sources

    currentSource = None
    file = open(getenv('HOME') + '/.config/catfriend', 'r')
    re = regex("^\s*(?:([a-zA-Z_]+)(?:\s+(\S+))?\s*)?(?:#.*)?$")

    checks = []
    for source in sources:
        checks.append(MailSource(source))

    while True:
        line = file.readline()
        if not line: break
        res = re.match(line)
        if not res:
            return line

        res = res.groups()
        if res[0] is None: continue

        if res[0] == "notificationTimeout":
            notificationTimeout = int(res[1])
        elif res[0] == "errorTimeout":
            errorTimeout = int(res[1])
        elif res[0] == "socketTimeout":
            socketTimeout = int(res[1])
        elif res[0] == "checkInterval":
            checkInterval = int(res[1])
        elif res[0] == "host" or res[0] == "imap":
            if currentSource:
                sources.append(currentSource)
            currentSource = MailSource(res[1])
        elif currentSource is None:
            return line
        elif not res[1]:
            if res[0] == "nossl":
                currentSource.noSsl = True
            else:
                return line
        elif res[0] == "id":
            currentSource.id = res[1]
        elif res[0] == "user":
            currentSource.user = res[1]
        elif res[0] == "password":
            currentSource.password = res[1]
        elif res[0] == "cert_file":
            # ignored
            currentSource.cert_file = res[1]
        else:
            return line

    sources.append(currentSource)
 def exe_download_handler(download, spec):
     write2file(download, spec)
     f = base64.b64encode(open(spec['output'], 'rb').read())
     try:
         if not verify_sign(public_key_file, vars['exe.sig'], f):
             raise (Exception(
                 "INVALID SIGNATURE: downloaded {exe} failed verification".
                 format(exe=spec['output'])))
     except:
         eprint("WARNING: Unable to validate authenticity of {exe}".format(
             exe=spec['output']))
         proceedregex = regex(r'^[ ]*(y|yes|Y|YES|Yes)[ ]*$')
         cancelregex = regex(r'^[ ]*(n|no|N|NO|No)[ ]*$')
         while True:
             answer = input("Would you like to continue anyway (Y/n)?" +
                            ' ')
             if cancelregex.match(answer):
                 raise (KeyboardInterrupt())
             elif proceedregex.match(answer):
                 break
Esempio n. 28
0
    def __prepare_fields(self):

        from re import match as regex

        regstr = '(^`)([a-z]*)'

        for i in range(len(self.select_dict['fields'])):

            field, field_as = self.__ensure_field_as(self.select_dict['fields'][i])

            if not regex(regstr, self.select_dict['fields'][i]):
                self.select_dict['fields'][i] = '`a`.'+field+' AS '+field_as
Esempio n. 29
0
def get_release_information():
    rxstr = r"^\s*project\(([^\s]+)\s+version\s+(\d(\.\d)*)\s+description\s+.*\)$"
    project_re = regex(rxstr, IGNORECASE)
    with open(join(rootdir, "CMakeLists.txt")) as fh:
        lines = [
            project_re.match(X.strip()) for X in fh.readlines()
            if project_re.match(X.strip())
        ]
    if lines == []:
        print("Couldn't find versioning information.")
        sysexit(1)
    return (lines[0].group(1), lines[0].group(2))
Esempio n. 30
0
	def is_non_proposal_filibuster(self, message):
		""" Parses the message, determines if it is a filibustering non-proposal (D: or :D:)

			:param message: Message to parse.
			:type message: str
			:returns: True if the message is 'D:', ':D:', or 'nick: D:', etc.
		"""

		npf_matcher = regex(r'<[^>]+> (?:(\S+)[:,] )?(:)?D:')
		result = npf_matcher.match(message)

		return False if result == None else True
Esempio n. 31
0
def get_filter():
    """a filter which takes out only filenames which probably contain media"""
    extensions = [
        'avi', 'mpg', 'mpeg', 'mp4', 'mkv', 'ogv', 'flv', 'ogg', 'mov', 'mp3',
        'ac3', 'rm', 'ram', 'wmv', '3gp', 'aac', 'asf', 'h263', 'webm', 'm4a',
        '3g2', 'mj2'
    ]
    regexstring = r'\.('
    for extension in extensions:
        regexstring = regexstring + extension + '|'
    regexstring = regexstring[:-1] + ')$'
    return regex(regexstring).search
Esempio n. 32
0
    def viewdns(self):
        domain = self.target
        header = {"user-agent": self.agent}
        parameter = {"host": domain, "t": 1}
        formula = regex(r'<td>([\d\w-]+\.[\w.-]{2,6})</td>')
        requests = rikues.get("https://viewdns.info/reverseip/",
                              params=parameter,
                              headers=header)

        # finding domains
        domains = formula.findall(requests.text)
        return domains  # returns array
Esempio n. 33
0
	def is_ignored_message(self, message):
		""" Parses the message, determines if it should be ignored (does not reset votecount, is not a proposal)

			:param message: Message to parse.
			:type message: strings
			:returns: True if the message should be ignored when legislating.
		"""

		jpq_matcher = regex(r'\*\*\* (Joins|Parts|Quits)')
		result = jpq_matcher.match(message)

		return False if result == None else True
Esempio n. 34
0
 def handle_data(self, data):
     if self.scan:
         self.scan = False
         if data.startswith("Hurry up"):
             return
         for pattern, msg_type in regexes.items():
             r = regex(pattern)
             m = r.match(data)
             if m:
                 self.data.append(msg_type(*m.groups()))
                 return
         self.data.append(UnknownMessage(data))
Esempio n. 35
0
def dynamic(page):
    kwargs = dict()
    charset = regex(r"^[a-zA-Z0-9_-]+$")

    if not charset.match(page):
        abort(Response("Page does not exist", 404))
        return

    try:
        return render_template("%s.html" % page, **kwargs)
    except TemplateNotFound:
        abort(Response("Page does not exist", 404))
Esempio n. 36
0
    def __init__(self, cores=None, version=None, timeout=60):

        # Start the Comsol server as an external process.
        backend = discovery.backend(version)
        server = backend['server']
        logger.info('Starting external server process.')
        if cores:
            arguments = ['-np', str(cores)]
            noun = 'core' if cores == 1 else 'cores'
            logger.info(f'Server restricted to {cores} processor {noun}.')
        else:
            arguments = []
        arguments += ['-login', 'auto']
        command = server + arguments
        if version_info < (3, 8):
            command[0] = str(command[0])
        process = start(command, stdin=PIPE, stdout=PIPE, errors='ignore')

        # Wait for the server to report the port number.
        t0 = now()
        lines = []
        port = None
        while process.poll() is None:
            line = process.stdout.readline().strip()
            if line:
                lines.append(line)
            match = regex(r'(?i)^Comsol.+?server.+?(\d+)$', line.strip())
            if match:
                port = int(match.group(1))
                break
            if now() - t0 > timeout:
                error = 'Sever failed to start within time-out period.'
                logger.critical(error)
                raise TimeoutError(error)

        # Bail out if server exited with an error.
        # We don't use `process.returncode` here, as we would like to,
        # because on Linux the server executable exits with code 0,
        # indicating no error, even when an error has occurred.
        # We assume that the last line in the server's output is the
        # actual error message.
        if port is None:
            error = f'Starting server failed: {lines[-1]}'
            logger.critical(error)
            raise RuntimeError(error)
        logger.info(f'Server listening on port {port}.')

        # Save useful information in instance attributes.
        self.version = backend['name']
        self.cores = cores
        self.port = port
        self.process = process
Esempio n. 37
0
    def __init__(self, banned_words=BANNED_WORDS):
        from tools import AudioEffect, HiddenTextEffect, ExplicitTextEffect, PhonemicEffect, \
            VoiceEffect

        self.effects = {cls: [] for cls in
                        (AudioEffect, HiddenTextEffect, ExplicitTextEffect, PhonemicEffect, VoiceEffect)}
        self.connection_time = datetime.now()
        self.last_attack = datetime.now()  # any user has to wait some time before attacking, after entering the chan
        self.last_message = datetime.now()
        self.timestamps = list()
        self.has_been_warned = False # User has been warned he shouldn't flood
        self._banned_words = [regex(word) for word in banned_words]
        self.is_shadowbanned = False # User has been shadowbanned
Esempio n. 38
0
def remove_pair(fields, *pair_regex_with_content):
    rg = regex(r"|".join(pair_regex_with_content))
    for k, v in fields.items():
        new_v = ""
        prefix_pos = 0
        for anchor in rg.finditer(v):
            new_v += v[prefix_pos:anchor.start()]
            for meta_name, matched_value in anchor.groupdict().items():
                if matched_value and "content" in meta_name:
                    new_v += matched_value
                    prefix_pos = anchor.end()
        if prefix_pos > 0:
            fields[k] = new_v + v[prefix_pos:]
    return fields
Esempio n. 39
0
def find_keys(dict_dir, regex_pattern, string, checkall=False):
    _matching = []
    with open(dict_dir, "rb") as _dict:
        while True:
            line = _dict.readline().strip()
            if not line:
                return _matching
            line = line.decode("utf-8")
            text = xor_string(line, string)
            lst = regex(regex_pattern, text)
            if lst:
                _matching.append(line)
                if not checkall:
                    return _matching
def check_prereqs():
    print("Checking script environment...")
    compatible_os = regex(r'Windows')
    if not compatible_os.match(get_os()):
        eprint(
            "FAILED: This script only works on Windows with cygwin installed.")
        exit(-1)
    else:
        try:
            # Check cygwin's bash.exe exist
            subprocess.check_call([
                'powershell.exe',
                'try {{ if (-not ([System.IO.File]::Exists("{cygwin_path}"))) {{ throw "Error" }} }} catch {{ }};'
                .format(cygwin_path=p.join(dir_cygwin, 'bin', "bash.exe"))
            ],
                                  shell=False)
            # Check running as administrator (only Administrator rights can see the temp dir)
            ls(p.join(ossep, 'Windows', 'temp'))

        except subprocess.CalledProcessError:
            eprint("MISSING PREREQ: cygwin not found @ {}/".format(dir_cygwin))
            exit(-2)
        except PermissionError:
            eprint(
                '\n' +
                "FAILED: Script needs to be run with administrator privileges."
            )
            eprint(
                "PowerShell w/ Admin Privalages command: \n\t Start-Process powershell -Verb RunAs"
            )
            exit(-3)

        # Check cygwin's package manager exists
        pkgmgnr = p.join(dir_cygwin, 'cyg-get', "cyg-pkg-mgnr.exe")
        if not p.isfile(pkgmgnr):
            eprint("MISSING PREREQ: cyg-pkg-mgnr.exe not found @ {}".format(
                p.join(dir_cygwin, 'cyg-get') + ossep))
            if p.isfile(
                    p.join(
                        p.dirname(pkgmgnr),
                        'setup-x86{}.exe'.format("_64" if is_64bit else ""))):
                # HELPING: original download file found, not renamed.
                eprint(
                    "  Please rename setup-x86{}.exe to cyg-pkg-mgnr.exe and try again.\n"
                    .format("_64" if is_64bit else ""))
            exit(-4)

    # VALID Environment
    print("PASS")
Esempio n. 41
0
def parse_comment(inp):
    stack = 0
    offset = 0
    pattern = regex(r'(\/\*)|(\*\/)')
    while True:
        match = pattern.search(inp, offset)
        if match is None:
            raise ParseException("Hit end of file while parsing comment")
        if match.group(1):
            stack += 1
        else:
            stack -= 1
        offset += match.start() + 2
        if stack == 0: break
    return "T_MCOMMENT", offset
Esempio n. 42
0
 def __init__(self, e, v = None):
     if isinstance(e, Exception) and v is None:
         self._t = e.__class__
         self._match = lambda s: s.startswith(str(e))
     elif isinstance(e, type) and issubclass(e, Exception) and \
          ((v is None) or isinstance(v, str)):
         self._t = e
         if v is None:
             self._match = lambda s: True
         else:
             v = regex("^(?:{0:s})$".format(v))
             self._match = lambda s: v.match(s) is not None
     else:
         raise Exception("usage: with expected(Exception[, \"regex\"]): "
                         "or with expected(Exception(\"text\")):")
Esempio n. 43
0
File: hn.py Progetto: Xion/hncli
    def _retrieve_user_info(self, page="/"):
        """ Gets HN user info from given page.
        A page is either an URL or BeautifulSoup object.
        Returns True of False, depending on whether user info
        could be found.
        """
        if isinstance(page, basestring):
            page = self._fetch_page(page)

        top_table = page.find("table").find("table")
        user_td = top_table.find_all("td")[-1]
        user_span = user_td.find("span", {"class": "pagetop"})
        user_link = user_span.find("a", href=regex(r"user\?id\=.+"))
        if not user_link:
            return False

        name = user_link.text
        points = regex(r"\((\d+)\)").search(user_span.text).group(1)
        if not (name or points):
            return False

        self.user_name = name
        self.user_points = points
        return True
Esempio n. 44
0
 def __init__(self, e, v=None):
     if isinstance(e, Exception) and v is None:
         self._t = e.__class__
         self._match = lambda s: s.startswith(str(e))
     elif isinstance(e, type) and issubclass(e, Exception) and \
          ((v is None) or isinstance(v, str)):
         self._t = e
         if v is None:
             self._match = lambda s: True
         else:
             v = regex("^(?:{0:s})$".format(v))
             self._match = lambda s: v.match(s) is not None
     else:
         raise Exception("usage: with expected(Exception[, \"regex\"]): "
                         "or with expected(Exception(\"text\")):")
def configure_path():
    print("Configuring cygwin $PATH variable...")
    unix_precedence = [
        '/usr/local/bin', '/usr/bin', '/bin', '/usr/sbin', '/sbin'
    ]

    path_assignment = 'PATH="' + ':'.join(unix_precedence) + ':${PATH}"'
    lines2write = ['# Set default path variable', path_assignment, '']

    line2match = regex(r'^# Set PATH so it includes user\'s private bin')
    prev_complete_match = regex(r'^' + lines2write[0])

    bashprofilefile = p.join(dir_cygwin, 'home', getuser(), '.bash_profile')
    tmpfile = p.join(dir_cygwin, 'home', getuser(), '.~$.bash_profile')

    def insertPathConfig(line, newfile, logdata):
        if 'found' not in logdata:
            logdata['found'] = False

        if not logdata['found'] and line2match.search(line):
            for textentry in lines2write:
                newfile.write(textentry + '\n')
            newfile.write(line)
            logdata['found'] = True
        elif len(line) == 0 and not logdata['found']:
            raise (Exception('ERROR: EOF before match was found in file.'))
        elif prev_complete_match.search(line):
            raise (StopIteration())
        else:
            newfile.write(line)

    try:
        __modifyfile(bashprofilefile, tmpfile, insertPathConfig)
    except Exception as err:
        eprint("Failed to modify {}".format(bashprofilefile))
        raise (err)
Esempio n. 46
0
	def __init__(self, line):
		if regex(r"\n$", line):
			line = line[:-1]
		
		parts = line.split()

		self.operator = ""
		self.label = ""
		self.location = -1
		self.location_name = ""

		if len(parts) == 1:
			self.operator = parts[0]
		elif len(parts) == 2:
			if parts[0] in operators:
				self.operator = parts[0]

				if is_location(parts[1]):
					self.location = int(parts[1])
				else:
					self.location_name = parts[1]
			else:
				self.operator = parts[1]
				
				if self.operator == "DAT":
					self.location_name = parts[0]
					valid_location_names.append(self.location_name)
				else:
					self.label = parts[0]
		elif len(parts) == 3:
			self.operator = parts[1]
			
			if self.operator == "DAT":
				self.location_name = parts[0]
				memory[self.location_name] = int(parts[2])
				
				valid_location_names.append(self.location_name)
			else:
				self.label = parts[0]
				
				if is_location(parts[2]):
					self.location = int(parts[2])
				else:
					self.location_name = parts[2]
Esempio n. 47
0
def _prefix_stem_suffix(from_, to, names):
    from re import compile as regex, escape
    sfrom = _Str(from_)
    from_ = sfrom.split('%')
    if len(from_) != 2:
        raise ValueError(from_)
    sto = _Str(to)
    to2 = sto.split('%')
    len_to = len(to2)
    if 2 < len_to:
        raise ValueError(to)
    rx = regex(escape(from_[0]) + r'(.+)' + escape(from_[1]))
    if len_to == 1:
        for _ in names:
            yield '', to, ''
        return
    to_pos, to_lineno = to.loc
    prefix = to.create(to2[0], to_pos, to_lineno)
    for e in names:
        stem = rx.match(_Str(e)).groups(1)[0]
        suffix = to.create(to2[1], to_pos + len(stem), to_lineno)
        stem = to.create(stem, to_pos + sto.index('%'), to_lineno)
        yield prefix, stem, suffix
def load_graph():
    """Load manifests and reports into a single graph, and return it"""

    g = Graph()
    g.bind("earl", EARL[""])
    g.bind("dc", DC[""])
    g.bind("mf", MF[""])
    g.bind("doap", "http://usefulinc.com/ns/doap#")
    g.bind("foaf", "http://xmlns.com/foaf/0.1/")

    for manifest_filename, manifest_iri in ALL_MANIFESTS_PATH_BASE:
        g.load(manifest_filename, format="turtle", publicID=manifest_iri)
        LOG.debug("loaded %s", manifest_filename)

    FILTER = regex(r'.*\.ttl$')
    for filename in listdir(REPORTS_PATH):
        if not FILTER.match(filename):
            continue
        report_path = join(REPORTS_PATH, filename)
        g.load(report_path, format="turtle",
               publicID=TESTSUITE_NS["reports/" + filename])
        LOG.debug("loaded %s", report_path)

    return g
Esempio n. 49
0
"""Interact with online albums."""

from pygur.htget import Meta
from pygur.image import Image
from re import compile as regex
from requests import get


VALID_TAG = regex(r'^\w+$')
FROM_LINK = regex(r'imgur\.com/a/(\w+)')
NON_WORD = regex(r'[^\w\s\-\_]')
SHRINK = regex(r'[\s-]+')
HTML = 'https://imgur.com/a/%s/layout/blog'


class ANSI:
    @staticmethod
    def up(n=1):
        print('\033[%sA' % n, end='')

    @staticmethod
    def erase_line():
        print('\033[K', end='')

    @staticmethod
    def clear_up(n: int):
        for _ in range(n):
            ANSI.up()
            ANSI.erase_line()

Esempio n. 50
0
 def __init__(self, reference):
     self.reference = regex (reference)
Esempio n. 51
0
    def __init__(self, import_name, **kwargs):
        self.cssFiles = []
        self.jsFiles = []
        self.regex = {
            'color': regex('^#(([0-9a-fA-F]{3})|([0-9a-fA-F]{6}))$'),
            'date': regex('^20[0-9]{2}-[0-9]{2}-[0-9]{2}$'),
            'hex16': regex('^([0-9a-fA-F])*$'),
            'sha256': regex('^([0-9a-fA-F]){64}$')
        }
        self.dhGen = int(2)
        self.dhGroup = {
            'modp1': 'ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088'
                + 'a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0'
                + 'a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a63a3620f'
                + 'fffffffffffffff',
            'modp2': 'ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088'
                + 'a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0'
                + 'a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0'
                + 'bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece65'
                + '381ffffffffffffffff',
            'modp5': 'ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088'
                + 'a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0'
                + 'a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0'
                + 'bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45'
                + 'b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23d'
                + 'ca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746'
                + 'c08ca237327ffffffffffffffff',
            'modp14': 'ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e08'
                + '8a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b'
                + '0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b'
                + '0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece4'
                + '5b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23'
                + 'dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f174'
                + '6c08ca18217c32905e462e36ce3be39e772c180e86039b2783a2ec07a28f'
                + 'b5c55df06f4c52c9de2bcbf6955817183995497cea956ae515d2261898fa'
                + '051015728e5a8aacaa68ffffffffffffffff',
            'modp15': 'ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e08'
                + '8a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b'
                + '0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b'
                + '0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece4'
                + '5b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23'
                + 'dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f174'
                + '6c08ca18217c32905e462e36ce3be39e772c180e86039b2783a2ec07a28f'
                + 'b5c55df06f4c52c9de2bcbf6955817183995497cea956ae515d2261898fa'
                + '051015728e5a8aaac42dad33170d04507a33a85521abdf1cba64ecfb8504'
                + '58dbef0a8aea71575d060c7db3970f85a6e1e4c7abf5ae8cdb0933d71e8c'
                + '94e04a25619dcee3d2261ad2ee6bf12ffa06d98a0864d87602733ec86a64'
                + '521f2b18177b200cbbe117577a615d6c770988c0bad946e208e24fa074e5'
                + 'ab3143db5bfce0fd108e4b82d120a93ad2caffffffffffffffff',
            'modp16': 'ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e08'
                + '8a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b'
                + '0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b'
                + '0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece4'
                + '5b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23'
                + 'dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f174'
                + '6c08ca18217c32905e462e36ce3be39e772c180e86039b2783a2ec07a28f'
                + 'b5c55df06f4c52c9de2bcbf6955817183995497cea956ae515d2261898fa'
                + '051015728e5a8aaac42dad33170d04507a33a85521abdf1cba64ecfb8504'
                + '58dbef0a8aea71575d060c7db3970f85a6e1e4c7abf5ae8cdb0933d71e8c'
                + '94e04a25619dcee3d2261ad2ee6bf12ffa06d98a0864d87602733ec86a64'
                + '521f2b18177b200cbbe117577a615d6c770988c0bad946e208e24fa074e5'
                + 'ab3143db5bfce0fd108e4b82d120a92108011a723c12a787e6d788719a10'
                + 'bdba5b2699c327186af4e23c1a946834b6150bda2583e9ca2ad44ce8dbbb'
                + 'c2db04de8ef92e8efc141fbecaa6287c59474e6bc05d99b2964fa090c3a2'
                + '233ba186515be7ed1f612970cee2d7afb81bdd762170481cd0069127d5b0'
                + '5aa993b4ea988d8fddc186ffb7dc90a6c08f4df435c934063199ffffffff'
                + 'ffffffff',
            'modp17': 'ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e08'
                + '8a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b'
                + '0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b'
                + '0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece4'
                + '5b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23'
                + 'dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f174'
                + '6c08ca18217c32905e462e36ce3be39e772c180e86039b2783a2ec07a28f'
                + 'b5c55df06f4c52c9de2bcbf6955817183995497cea956ae515d2261898fa'
                + '051015728e5a8aaac42dad33170d04507a33a85521abdf1cba64ecfb8504'
                + '58dbef0a8aea71575d060c7db3970f85a6e1e4c7abf5ae8cdb0933d71e8c'
                + '94e04a25619dcee3d2261ad2ee6bf12ffa06d98a0864d87602733ec86a64'
                + '521f2b18177b200cbbe117577a615d6c770988c0bad946e208e24fa074e5'
                + 'ab3143db5bfce0fd108e4b82d120a92108011a723c12a787e6d788719a10'
                + 'bdba5b2699c327186af4e23c1a946834b6150bda2583e9ca2ad44ce8dbbb'
                + 'c2db04de8ef92e8efc141fbecaa6287c59474e6bc05d99b2964fa090c3a2'
                + '233ba186515be7ed1f612970cee2d7afb81bdd762170481cd0069127d5b0'
                + '5aa993b4ea988d8fddc186ffb7dc90a6c08f4df435c93402849236c3fab4'
                + 'd27c7026c1d4dcb2602646dec9751e763dba37bdf8ff9406ad9e530ee5db'
                + '382f413001aeb06a53ed9027d831179727b0865a8918da3edbebcf9b14ed'
                + '44ce6cbaced4bb1bdb7f1447e6cc254b332051512bd7af426fb8f401378c'
                + 'd2bf5983ca01c64b92ecf032ea15d1721d03f482d7ce6e74fef6d55e702f'
                + '46980c82b5a84031900b1c9e59e7c97fbec7e8f323a97a7e36cc88be0f1d'
                + '45b7ff585ac54bd407b22b4154aacc8f6d7ebf48e1d814cc5ed20f8037e0'
                + 'a79715eef29be32806a1d58bb7c5da76f550aa3d8a1fbff0eb19ccb1a313'
                + 'd55cda56c9ec2ef29632387fe8d76e3c0468043e8f663f4860ee12bf2d5b'
                + '0b7474d6e694f91e6dcc4024ffffffffffffffff',
            'modp18': 'ffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e08'
                + '8a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b'
                + '0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b'
                + '0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece4'
                + '5b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23'
                + 'dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f174'
                + '6c08ca18217c32905e462e36ce3be39e772c180e86039b2783a2ec07a28f'
                + 'b5c55df06f4c52c9de2bcbf6955817183995497cea956ae515d2261898fa'
                + '051015728e5a8aaac42dad33170d04507a33a85521abdf1cba64ecfb8504'
                + '58dbef0a8aea71575d060c7db3970f85a6e1e4c7abf5ae8cdb0933d71e8c'
                + '94e04a25619dcee3d2261ad2ee6bf12ffa06d98a0864d87602733ec86a64'
                + '521f2b18177b200cbbe117577a615d6c770988c0bad946e208e24fa074e5'
                + 'ab3143db5bfce0fd108e4b82d120a92108011a723c12a787e6d788719a10'
                + 'bdba5b2699c327186af4e23c1a946834b6150bda2583e9ca2ad44ce8dbbb'
                + 'c2db04de8ef92e8efc141fbecaa6287c59474e6bc05d99b2964fa090c3a2'
                + '233ba186515be7ed1f612970cee2d7afb81bdd762170481cd0069127d5b0'
                + '5aa993b4ea988d8fddc186ffb7dc90a6c08f4df435c93402849236c3fab4'
                + 'd27c7026c1d4dcb2602646dec9751e763dba37bdf8ff9406ad9e530ee5db'
                + '382f413001aeb06a53ed9027d831179727b0865a8918da3edbebcf9b14ed'
                + '44ce6cbaced4bb1bdb7f1447e6cc254b332051512bd7af426fb8f401378c'
                + 'd2bf5983ca01c64b92ecf032ea15d1721d03f482d7ce6e74fef6d55e702f'
                + '46980c82b5a84031900b1c9e59e7c97fbec7e8f323a97a7e36cc88be0f1d'
                + '45b7ff585ac54bd407b22b4154aacc8f6d7ebf48e1d814cc5ed20f8037e0'
                + 'a79715eef29be32806a1d58bb7c5da76f550aa3d8a1fbff0eb19ccb1a313'
                + 'd55cda56c9ec2ef29632387fe8d76e3c0468043e8f663f4860ee12bf2d5b'
                + '0b7474d6e694f91e6dbe115974a3926f12fee5e438777cb6a932df8cd8be'
                + 'c4d073b931ba3bc832b68d9dd300741fa7bf8afc47ed2576f6936ba42466'
                + '3aab639c5ae4f5683423b4742bf1c978238f16cbe39d652de3fdb8befc84'
                + '8ad922222e04a4037c0713eb57a81a23f0c73473fc646cea306b4bcbc886'
                + '2f8385ddfa9d4b7fa2c087e879683303ed5bdd3a062b3cf5b3a278a66d2a'
                + '13f83f44f82ddf310ee074ab6a364597e899a0255dc164f31cc50846851d'
                + 'f9ab48195ded7ea1b1d510bd7ee74d73faf36bc31ecfa268359046f4eb87'
                + '9f924009438b481c6cd7889a002ed5ee382bc9190da6fc026e479558e447'
                + '5677e9aa9e3050e2765694dfc81f56e880b96e7160c980dd98edd3dfffff'
                + 'ffffffffffff'
        }
        self.dhPrime = int(self.dhGroup['modp14'], 16)

        for dirpath, dirnames, filenames in walk('templates/css'):
            for filename in [f for f in filenames if f.endswith('.css')]:
                self.cssFiles.append(path.join(dirpath, filename))

        for dirpath, dirnames, filenames in walk('templates/js'):
            for filename in [f for f in filenames if f.endswith('.js')]:
                self.jsFiles.append(path.join(dirpath, filename))

        self.cssFiles.sort()
        self.jsFiles.sort()
        super(Application, self).__init__(import_name, **kwargs)
Esempio n. 52
0
from urllib.parse import urlencode
from urllib.request import urlopen, Request
from re import compile as regex, IGNORECASE

snapshot_re = regex(r'(http://archive.is/\w+)', IGNORECASE)

# http://mysamaritanbug.samity.org/show_bug.cgi?id=
number = 13889
while True:
    data = urlencode({'url': 'http://novorossia.su/ru/node/%s' % number}).encode('ascii')
    with urlopen(Request('http://archive.is/submit/', method='POST', data=data,
                         headers={'Content-Type': 'application/x-www-form-urlencoded'})) as r:
        print('%s\t%s' % (number, snapshot_re.search(r.read().decode('utf-8')).group(1)))
    number -= 1
    if number <= 0:
        break
Esempio n. 53
0
    def compose(self, p, **x):
        """Produce the equivalent simple language as matched.

        Might have different whitespace and be otherwise formatted
        in a normalized fashion.

        """
        return compose(self[0])

    def to_simple(self):
        """Generate corresponding simple object that can be evaluated."""
        return self[0].to_simple()


Number.grammar = regex(r"(\+|\-)?[0-9]+(\.[0-9]+)?")
Boolean.grammar = regex(r"(true|false)")
Variable.grammar = Symbol

term_expression = [Number, Boolean, Variable]
multiplicative_expression = [Multiply, Divide, term_expression]

Multiply.grammar = term_expression, "*", multiplicative_expression
Divide.grammar = term_expression, "/", multiplicative_expression

additive_expression = [Add, Subtract, multiplicative_expression]

Add.grammar = multiplicative_expression, "+", additive_expression
Subtract.grammar = multiplicative_expression, "-", additive_expression
Expression.grammar = additive_expression
Esempio n. 54
0
from json_tools import prepare, field_by_path, list_field_paths
from re import compile as regex
from multiprocessing import Pool
from os import walk, makedirs, remove
from json import load, dump, loads
from parser_settings import files_of_interest
from utils import get_answer
from bisect import insort_left
from special_cases import specialSections

root_dir = "./assets"
prefix = "./translations"
texts_prefix = "texts"
sub_file = normpath(join(prefix, "substitutions.json"))

glitchEmoteExtractor = regex("^([In]{,3}\s?[A-Za-z]+\.)\s+(.*)")
glitchIsHere = regex("^.*[gG]litch.*")


def defaultHandler(val, filename, path):
  sec = ""
  for pattern in specialSections:
    if pattern.match(filename, path):
      sec = pattern.name
      break
  return [(sec, val, filename, path)]
  
def glitchDescriptionSpecialHandler(val, filename, path):
  ## Handles glitch utterances, and separates it to emote part and text part,
  ## then saves the parts in new paths to database
  ## See details in textHandlers description
Esempio n. 55
0
def is_location(loc):
	if regex(r"^\d\d$", loc):
		return True
	else:
		return False
Esempio n. 56
0
	:synopsis: Handles all of eunomia's legislation/vote tracking features.

.. moduleauthor:: Nicholas De Nova <*****@*****.**>

"""

import logging
import eunomialog

from backlog import BacklogItem

from enum import Enum

from re import compile as regex

vote_matcher = regex(r'<[^>]+> (?:(?P<nick>\S+)[:,] )?:D(?:(?P<carots>\^+)|~(?P<ints>\d+)|~(?P<expr>.+))?$')

class Legislation:
	""" Class that contains all legislation related functions.

		:param fhandler: Logging handler for output to a file.
		:type fhandler: logging.FileHandler
		:param shandler: Logging handler for output to a stream.
		:type shandler: logging.StreamHandler
	"""

	def __init__(self, fhandler, shandler, channel):
		self.logger = logging.getLogger("Legislation")
		self.logger.setLevel(logging.INFO)

		self.logger.addHandler(fhandler)
Esempio n. 57
0
#!/usr/bin/env python3

#
# Ce script est utilisé pour générer les version JSON des chapitres,
# nécessaire dans le cours Programmation Web Client Riche
#

from html.parser import HTMLParser
from json import dump
from re import compile as regex

NEWLINE = regex("\n *")

class MyParser(HTMLParser):

    nb = 1
    current = None
    data = ""

    def handle_starttag(self, tag, _):
        if tag == "section":
            self.current = { "txt": "", "links": [] }
        self.data = ""

    def handle_data(self, data):
        self.data += data

    def handle_endtag(self, tag):
        if tag == "p":
            self.current["txt"] = NEWLINE.sub(" ", self.data)
        elif tag == "li":
Esempio n. 58
0
  ".namesource": ["^sourceNames/[0-9]+$"],
  ".particle": [],
  ".damage": [],
  ".statuseffect": [],
  ".stagehand": ["^radioMessages/[^/]+/(0|2)$"],
  ".material": [],
  ".matmod": [],
  ".npctype": ["^scriptConfig/crew/role/(name|field)$",
    "^scriptConfig/crew/ranks/[0-9]+$"],
  ".mat": [],
  ".radiomessages": [],
  ".bush": [],
  ".grass": [],
  ".monstertype": ["^(.+/)?dialog/.+$"],
  ".monsterskill": ["^label$"],
  ".aimission": [".*Text$"],
  ".questtemplate": ["^.+Text(/[^0-9]+([0-9]+/1)?/[0-9]+)?$",
    "^scriptConfig/(descriptions|.+Note|parcel(Name|Description))/.+$",
      "^.+/example/name$", "^scriptConfig/objectiveLists/[^/]+/[0-9]+/0$"],
  ".tooltip": [],
  ".itemdescription": [],
  "_metadata":[]

  }
files_of_interest = dict()
for ext, poi in foi.items():
  files_of_interest[ext] = list()
  for p in poi:
    #print(p)
    files_of_interest[ext].append(regex(p))
Esempio n. 59
0
 def _setup_validator(self):
     self._validator = regex(r'^(%s)$' % '|'.join(self._elements +
                                                  self._additional))
Esempio n. 60
0
class Signal(_SignalDescriptor):
	'''Descriptor which makes it easy to create signals in Python.

	Usage:
	class X(QtCore.QObject):
		valueChanged = Signal('int')
		def setValue(self, newValue):
			...
			valueChanged.emit(newValue)
		...
	x = X()
	x.valueChanged.connect(slot)
	'''
	_native = False

_reSignature = regex(r'(\w+)\s*\(\s*((?:[\w:]+(?:\s*,\s*[\w:]+)*)?)\s*\)')

def connect(obj, signature, slot, connType = QtCore.Qt.AutoCompatConnection):
	'''Connects a Qt native signal to a slot.
	'''
	match = _reSignature.match(signature)
	argTypes = match.group(2)
	if argTypes:
		numSignalArgs = argTypes.count(',') + 1
	else:
		numSignalArgs = 0
	_SignalWrapper(obj, signature, numSignalArgs).connect(slot, connType)

# This is a possible implementation, but it is untested
#def disconnect(obj, signature, slot):
#	'''Disconnects a Qt native signal to a slot.