Example #1
0
def parse_text(context, text, expression):
    parser = parse.compile(expression)
    res = parser.parse(text)

    # Make an implicit assumption that there might be something before the expression
    if res is None:
        expr = "{}" + expression
        parser = parse.compile(expr)
        res = parser.parse(text)

    # Make an implicit assumption that there might be something after the expression
    if res is None:
        expr = expression + "{}"
        parser = parse.compile(expr)
        res = parser.parse(text)

    # Make an implicit assumption that there might be something before/after the expression
    if res is None:
        expr = "{}" + expression + "{}"
        parser = parse.compile(expr)
        res = parser.parse(text)

    assert res, u"expression not found"
    assert res.named, u"expression not found"
    for key, val in res.named.items():
        context.persona[key] = val
Example #2
0
def process_logs(lines):
    p = compile("[{date:ti}] {event}")
    g = compile("Guard #{guard_id:d} begins shift")
    lines.sort()
    current_guard = None
    guard_records = []

    for line in lines:
        print line
        data = p.parse(line)
        if (data is not None):
            event_str = data['event']
            event_date = data['date']
            if (event_str.endswith("begins shift")):
                # If this is a guard event, make a new guard record
                if (current_guard is not None):
                    current_guard.end_shift()

                guard_data = g.parse(event_str)
                current_guard = GuardRecord(guard_data['guard_id'], event_date)
                guard_records.append(current_guard)

            if (event_str.startswith("wakes up")):
                current_guard.wakeup(event_date.minute)

            if (event_str.startswith("falls asleep")):
                current_guard.go_to_sleep(event_date.minute)

    return guard_records
Example #3
0
 def __init__(self, bucket, key_prefix='', region=None, transient=False, read_only=True,
              infrequent_access_kb_threshold=None, sampling_calculator=None):
     """
     :param bucket: Cassette s3 storage bucket
     :type bucket: str
     :param key_prefix: Optional key prefix for recordings
     :type key_prefix: str
     :param region: Optional aws region
     :type region: str
     :param transient: Is this a transient cassette, all recording under given prefix will be deleted when closed
     (only if not read only)
     :type transient: bool
     :param read_only: If True, this cassette can only be used to fetch recordings and not to create new ones,
     any write operations will raise an assertion.
     :type read_only: bool
     :param infrequent_access_kb_threshold: Threshold in KB that above it object will be saved in STANDARD_IA
     (infrequent access storage class), None means never (default)
     :type infrequent_access_kb_threshold: float
     :param sampling_calculator: Optional sampling ratio calculator function, before saving the recording this
     function will be triggered with (category, recording_size, recording),
     and the function should return a number between 0 and 1 which specify its sampling rate
     :type sampling_calculator: function
     """
     _logger.info(u'Creating S3TapeCassette using bucket {}'.format(bucket))
     self.bucket = bucket
     self.key_prefix = (key_prefix + '/') if key_prefix else ''
     self.transient = transient
     self.read_only = read_only
     self.infrequent_access_threshold = \
         infrequent_access_kb_threshold * 1024 if infrequent_access_kb_threshold else None
     self.sampling_calculator = sampling_calculator
     self._random = Random(110613)
     self._metadata_key_parser = compile(self.METADATA_KEY)
     self._recording_id_parser = compile(self.RECORDING_ID)
     self._s3_facade = S3BasicFacade(self.bucket, region=region)
def parseThreadsLog(threads_log):
    # [0.019s][info][os,thread] Thread is alive (tid: 23727, pthread id: 139650126874368).
    pattern = compile(
        "[{time}][{level}][{loggers}] Thread {type} (tid: {tid:d}, pthread id: {pthid:d})."
    )
    time_pattern = compile("{seconds:d}.{millis:d}s")
    threads = {}

    with open(threads_log) as f:
        for line in f:
            parsed = pattern.parse(line)
            if parsed:
                tid = parsed.named['tid']

                if tid not in threads:
                    threads[tid] = []
                events = threads[tid]

                parsed_time = time_pattern.parse(parsed.named['time'])
                time = parsed_time.named['seconds'] * 1000 + parsed_time.named[
                    'millis']

                events.append({
                    'time': time,
                    'type': parsed.named['type'],
                    'tid': parsed.named['tid']
                })

    return threads
Example #5
0
    def _compile_formats_from_patterns(self, field_patterns):
        """Return a list/dict of patterns compiled from the
        str/list/dict of passed field_patterns.
        """
        if isinstance(field_patterns, str):
            return [
                parse.compile(format=field_patterns,
                              extra_types=extra_format_types)
            ]
        elif isinstance(field_patterns, list):
            return [
                parse.compile(format=p, extra_types=extra_format_types)
                for p in field_patterns
            ]
        elif isinstance(field_patterns, dict):
            compiled_field_patterns = {}
            for message_type, message_pattern in field_patterns.items():
                compiled_patterns = self._compile_formats_from_patterns(
                    message_pattern)
                compiled_field_patterns[message_type] = compiled_patterns
            return compiled_field_patterns

        else:
            raise ValueError(
                'Passed field_patterns must be str, list or dict. Found %s: %s'
                % (type(field_patterns), str(field_patterns)))
Example #6
0
    def __init__(self):
        self.MONTHS = [
            "january", "febraury", "march", "april", "may", "june", "july",
            "august", "september", "october", "november", "dicember"
        ]

        year = "{year:^ty}"
        month = "{month:^td}"
        month_verbose = "{monthv:^tdv}"
        day = "{day:^td}"
        delim = ["-", "/"]
        price = "{price:^g}"
        cond = "{cond:^cs}"
        format_date = [year + d + month + d + day for d in delim] + \
                      [day + d + month + d + year for d in delim] + \
                      [month + d + year for d in delim] + \
                      [month_verbose]
        extra_types_date = dict(ty=self.parse_year,
                                td=self.parse_day_month,
                                tdv=self.parse_verbose)
        extra_types = dict(cs=self.parse_condition)
        self.format_date = [(compile(x, extra_types=extra_types_date), x)
                            for x in format_date]
        self.format_price = compile(price)
        self.format_cond = compile(cond, extra_types=extra_types)
Example #7
0
def parse_text(context, text, expression):
    import parse
    parser = parse.compile(expression)
    res = parser.parse(text)

    # Make an implicit assumption that there might be something before the expression
    if res is None:
        expr = '{}' + expression
        parser = parse.compile(expr)
        res = parser.parse(text)

    # Make an implicit assumption that there might be something after the expression
    if res is None:
        expr = expression + '{}'
        parser = parse.compile(expr)
        res = parser.parse(text)

    # Make an implicit assumption that there might be something before/after the expression
    if res is None:
        expr = '{}' + expression + '{}'
        parser = parse.compile(expr)
        res = parser.parse(text)

    assert res, u'expression not found'
    assert res.named, u'expression not found'
    for key, val in res.named.items():
        context.persona[key] = val
Example #8
0
def parse_header_field(line):
    """
    Parses a line of the CryoSat-2 L1B ascii header and
    returns tag, value and unit as strings.

    Note: double quotes in string items are removed

    e.g.

    NUM_DSD=+0000000044
    -> "num_dsd", "+0000000044", None

    DSD_SIZE=+0000000280<bytes>
    -> "dsd_size", "+0000000280", "bytes"
    """
    tag, value, unit = None, None, None
    parser = parse.compile("{tag}={value}")
    match = parser.parse(line)
    if match:
        tag = match["tag"].lower()
        value = match["value"]
        value = value.replace("\"", "").strip()
        # check if unit is given
        unit_parser = parse.compile("{value}<{unit}>")
        unit_match = unit_parser.parse(value)
        if unit_match:
            value = unit_match["value"]
            unit = unit_match["unit"]
    return tag, value, unit
Example #9
0
def readccbbm_using_parse(filename):

    fid = open(filename, 'rt')
    lines = fid.readlines()
    regex = r'[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?'

    coords = []

    # Skip all lines until vertex
    ctr = 0
    parser1 = parse.compile(
        "Vertex {:d} {:g} {:g} {:g} {Jfeature=({:g} {:g} {:g} {:g} {:g} {:g} {:g})}"
    )
    parser2 = parse.compile("Vertex {:d} {:g} {:g} {:g}")

    while lines[ctr][0:6] != 'Vertex':
        ctr += 1

    # First read the vertices
    line = lines[ctr]
    line_split = line.split()
    ctr += 1
    # ctr = 1;
    attributes = []
    while line_split[0] == 'Vertex':
        result = parser1.parse(line)
        if result is not None:
            (idx, vtx1, vtx2, vtx3, radial_dist, mTBM1, mTBM2, mTBM3,
             detJacobian, eig1Jacobian, eig2Jacobian) = result.fixed
            attributes.append(radial_dist)
        else:
            result = parser2.parse(line)
            if result is not None:
                (idx, vtx1, vtx2, vtx3) = result.fixed
            else:
                sys.stdout.write('Cannot parse the line' + line)

        coords.append([vtx1, vtx2, vtx3])
        line = lines[ctr]
        line_split = line.split()
        ctr += 1

    coords = np.array(coords)
    # The rest of the lines are faces
    faces = []
    ctr -= 1
    for ii in range(ctr, len(lines)):
        line = lines[ii]
        result = parse.search("Face {:d} {:d} {:d} {:d}", line)
        (idx, face1, face2, face3) = result.fixed
        faces.append([face1, face2, face3])

    faces = np.array(faces)
    if faces.min() == 1:
        faces -= 1

    isMultilevelUCF = False
    return coords, faces, attributes, isMultilevelUCF
Example #10
0
 def __init__(self, pattern, directive):
     self.pattern = pattern
     self.types = {}
     self.expand()
     #print("Pattern:", pattern, self._expanded)
     self._p = [compile(expansion.pattern) for expansion in self._expanded]
     if len(self._p) == 0:
         self._p = [compile(pattern)]
     self.directive = directive
Example #11
0
def user_app_list(path=Path('/Applications')):
    """获取用户应用列表
    
    :param path: 用户应用的系统绝对路径

    :return applist: 应用列表。用于提取实体后,查询路径,进行启动、关闭等操作
    :return app_kw_dict 应用关键词词典。用于提取实体
    """
    applist = []
    app_kw_dict = {}

    # 遍历 application 目录
    for app in path.iterdir():
        appname = app.name.replace('.app', '')
        appinfo = {
            "name": appname,
            "tag": [],
            "path": f"/Applications/{app.name}"
        }
        appinfo['tag'].append(appname)

        tag_list = [appname]

        # 遍历每个app子目录中符合标准的文件,从中取 CFBundleDisplayName 值,保存为tag
        for file in app.rglob('InfoPlist.strings'):
            if file.is_file():
                # print(file)
                try:
                    with open(file, 'rb') as f:
                        content = f.read().decode('utf-16').splitlines()
                        for r in content:
                            p = compile('"CFBundleDisplayName" = "{name}";')
                            result = p.parse(r)
                            if result:
                                tag = result["name"]
                                appinfo["tag"].append(tag)
                                tag_list.append(tag)
                            else:
                                p = compile('CFBundleDisplayName = "{name}";')
                                result = p.parse(r)
                                if result:
                                    tag = result["name"]
                                    appinfo["tag"].append(tag)
                                    tag_list.append(tag)
                        appinfo["tag"] = list(set(appinfo["tag"]))  # 去重
                        tag_list = list(set(tag_list))
                except Exception as ex:
                    # print(f"出错文件为{file}")
                    continue

        applist.append(appinfo)
        app_kw_dict[appname] = tag_list

    return applist, app_kw_dict
Example #12
0
def parse_input(lines):
    parse_point = parse.compile('{:d},{:d}')
    parse_fold = parse.compile('fold along {}={:d}')

    points, folds = [], []
    for line in lines:
        if ',' in line:
            points.append(parse_point.parse(line).fixed)
        elif '=' in line:
            folds.append(parse_fold.parse(line).fixed)

    return points, folds
Example #13
0
def part1():
    records = read_input()
    records.sort()

    record_format = compile("[{}-{}-{} {}:{}] {}")
    action_format = compile("Guard #{} begins shift")

    # print(records)
    guard_on_shift = None
    guard_fell_asleep = None

    for record in records:
        year, month, day, hour, minute, action = record_format.parse(record)

        if action[0] == 'G':  # new guard begins shift
            # previous guard stayed awake until the end of their shift.
            # new guard is counted as "awake" before they begin shift

            guard_on_shift = action_format.parse(action)[0]
            guard_fell_asleep = None
            print("Guard {} begins shift at {}:{}".format(
                guard_on_shift, hour, minute))

        elif action == 'wakes up':  # current guard awaking from sleep
            # increment all minutes the guard was asleep for
            print("Guard {} wakes up at {}:{}".format(guard_on_shift, hour,
                                                      minute))
            for i in range(int(guard_fell_asleep), int(minute)):
                print("  Guard {} was sleeping during minute {}".format(
                    guard_on_shift, i))
                add_guard_asleep(guard_on_shift, i)

        elif action == 'falls asleep':  # current guard falling asleep
            print("Guard {} falls asleep at {}:{}".format(
                guard_on_shift, hour, minute))
            guard_fell_asleep = minute

    guard_total_asleep = {k: total_minutes(v) for k, v in guard_asleep.items()}
    print(guard_total_asleep)

    sleepiest_guard = max(guard_total_asleep, key=guard_total_asleep.get)

    print("Sleepiest guard is ID {} ({} minutes)".format(
        sleepiest_guard, guard_total_asleep[sleepiest_guard]))
    sleepy_guard_minutes = guard_asleep[sleepiest_guard]
    sleepiest_minute = max(sleepy_guard_minutes, key=sleepy_guard_minutes.get)

    print("Most often asleep at minute {}".format(sleepiest_minute))

    print("Product:", int(sleepiest_guard) * int(sleepiest_minute))

    part2()
Example #14
0
class OpcodeTestCase:
    def __init__(self, initial, command, result):
        """Create an opcode test which shows the original state of the 4 registers
        as an array, the command as an array and the result.
        """
        self.initial = initial
        self.command = command
        self.result = result

    BEFORE_MATCHER = compile("Before: [{0}, {1}, {2}, {3}]")
    COMMAND_MATCHER = compile("{0} {1} {2} {3}")
    RESULT_MATCHER = compile("After:  [{0}, {1}, {2}, {3}]")

    def get_possible_opcodes(self):
        executor = OpcodeExecutor()
        args = self.command[1:]
        possibles = []
        for cmd in OpcodeExecutor.COMMANDS:
            output = executor.executeCommand(
                cmd, args, map(None, self.initial))
            if (output == self.result):
                possibles.append(cmd)
        return possibles

    @staticmethod
    def parse(beforeLine, commandLine, resultLine):
        """Parse an opcode test in the form:
                Before: [1, 0, 2, 0]
                4 1 0 1
                After:  [1, 1, 2, 0]
        This will result in initial = [1, 0, 2, 0], command = [4 1 0 1], result = [1, 1, 2, 0]
        """
        beforeData = OpcodeTestCase.BEFORE_MATCHER.parse(beforeLine.strip())
        commandData = OpcodeTestCase.COMMAND_MATCHER.parse(commandLine.strip())
        resultData = OpcodeTestCase.RESULT_MATCHER.parse(resultLine.strip())

        # print "Before: ", beforeData
        # print "Command:", commandData
        # print "Result: ", resultData

        if (beforeData and commandData and resultData):
            before = map(int, [beforeData[0], beforeData[1],
                               beforeData[2], beforeData[3]])
            command = map(int, [commandData[0], commandData[1],
                                commandData[2], commandData[3]])
            result = map(int, [resultData[0], resultData[1],
                               resultData[2], resultData[3]])
            return OpcodeTestCase(before, command, result)

        raise ValueError("Invalid input: " + beforeLine +
                         " " + commandLine + " " + resultLine)
Example #15
0
class PlantRule:
    PARSER = compile("{0} => {1}")

    def __init__(self, pattern, end_state):
        self.pattern = pattern
        self.end_state = end_state

    def get_changes(self, state):
        """Return an array of tuples where the first item is the real index and the second item is the end state

        For example, [(0, "#"), (6, "#")] means the changes are change 0 to "#" and 6 to "#"
        """
        changes = []
        index = state.find(self.pattern)
        while index != -1:
            changes.append((index + 2, self.end_state))
            index = state.find(self.pattern, index + 1)

        return changes

    def __repr__(self):
        return self.pattern + " => " + self.end_state

    @staticmethod
    def parse(input_str):
        data = PlantRule.PARSER.parse(input_str)
        if data:
            return PlantRule(data[0], data[1])
        return None
Example #16
0
def main():
    if len(sys.argv) != 2:
        print('DLL path missing.')
        return

    dll_path = sys.argv[1]
    print('DLL path:', dll_path)

    export_name_pattern = parse.compile('\t Name : {0}')

    mangled_symbols = []
    try:
        dependencies = plumbum.local['Dependencies.exe']
        dll_exports = dependencies('-exports', dll_path).splitlines()
        for line in dll_exports:
            parse_result = export_name_pattern.parse(line)
            if parse_result:
                mangled_symbols.append(parse_result[0])
    except plumbum.CommandNotFound:
        print('"Dependencies.exe" not found.\n' \
              'Please download it from https://github.com/lucasg/Dependencies.\n' \
              'After installation, update your PATH environment.')
        return

    for symbol in mangled_symbols:
        try:
            print(symbol, '->',
                  cppmangle.cdecl_sym(cppmangle.demangle(symbol)))
        except:
            print(symbol)
Example #17
0
def update_data(molecule, line, pattern=parse.compile("{key:S}={value:S}")):
    """
    Update the data stored in molecule with what can be parsed from a string

    Parameters
    ----------
    molecule : pybel.Molecule
        The molecule to be updated
    line : str
        The string to be parsed
    pattern : parse.Parser, optional
        A parse.Parser instance to parse line

    Examples
    --------
    Currently only charge and spin are parsed:

    >>> mol = pybel.readstring("smi", "[Cu]")
    >>> mol.charge, mol.spin
    (0, 1)
    >>> update_data(mol, "charge=+1 spin=3")
    >>> mol.charge, mol.spin
    (1, 3)
    """
    for result in pattern.findall(line):
        if result["key"] == "charge":
            molecule.OBMol.SetTotalCharge(int(result["value"]))
        elif result["key"] == "spin":
            molecule.OBMol.SetTotalSpinMultiplicity(int(result["value"]))
Example #18
0
    def map_sentences(self) -> list:
        result = subprocess.run(
            [sys.executable, VECALIGN_PROGRAM,
             '--alignment_max_size', str(self.alignment_max_size),
             '--src', self.src_file_path,
             '--tgt', self.tgt_file_path,
             '--src_embed', self.src_overlap_file_path, self.src_emb_file_path,
             '--tgt_embed', self.tgt_overlap_file_path, self.tgt_emb_file_path
             ],
            capture_output=True,
            text=True,
            timeout=60
            )
        # print('\n\nRESULT:\n',result.stdout,'\n\nEND\n\n')
        print(result.stderr)

        # preprocess vecalign output: convert str to structural data
        mappings = []
        mapping_strs = result.stdout.split('\n')

        def parse_idx(ids_str: str):
            return [int(id_str) for id_str in ids_str.split(',')]

        def parse_cost(cost_str: str):
            return float(cost_str)

        parser = compile(VECALIGN_STDOUT_FORMAT)
        for mapping_str in mapping_strs:
            strs = parser.parse(mapping_str)
            if strs is not None:
                src_idx = parse_idx(strs[0])
                tgt_idx = parse_idx(strs[1])
                cost = parse_cost(strs[2])
                mappings.append((cost, src_idx, tgt_idx))
        return mappings
Example #19
0
class Record(object):
    time_format = "[%Y-%m-%d %H:%M]"
    guard_format = parse.compile("Guard #{id} begins shift")

    def __init__(self, record_str):
        r_date = record_str[:record_str.index("]") + 1]
        r_event = record_str[record_str.index("]") + 2:]

        self.date = datetime.strptime(r_date, Record.time_format)
        self.event = r_event

    def __str__(self):
        return datetime.strftime(self.date, Record.time_format) + " " + self.event

    def get_guard_id(self):
        gid = None
        if "Guard" in self.event:
            r = Record.guard_format.parse(self.event)
            gid = int(r["id"])

        return gid

    def get_event_type(self):
        if "Guard" in self.event or "wakes up" in self.event:
            return RecordType.WAKEUP
        if "falls asleep" in self.event:
            return RecordType.ASLEEP

    def get_time(self):
        corrected_time = self.date
        # round to midnight next day if needed
        if corrected_time.hour == 23:
            corrected_time = corrected_time.replace(hour = 0, minute = 0)
            corrected_time += timedelta(days=1)
        return corrected_time
Example #20
0
    def match(cls, sentence, steps):
        """
            Tries to find a match from the given sentence with the given steps

            :param string sentence: the step sentence to match
            :param dict steps: the available registered steps

            :returns: the arguments and the func which were matched
            :rtype: tuple
        """
        for regex, func in steps.items():
            if isinstance(regex, ArgumentExpression):
                try:
                    compiled = parse.compile(regex.regex, ArgExpRegistry().expressions)
                except ValueError as e:
                    raise StepArgumentRegexError(regex.regex, func.__name__, e)

                match = compiled.search(sentence)
                if match:
                    return match.fixed, match.named, func
            else:
                match = re.search(regex, sentence)
                if match:
                    return match.groups(), match.groupdict(), func

        return None, None, None
    def parseHeaders(self):
        headerParser = parse.compile("{name} : < {totalBytes} > {tail}")
        while True:
            l = self.file.readline()
            if not l:
                break

            if l.startswith('#') or l.startswith('\n'):
                continue

            parseResult = headerParser.parse(l)
            if parseResult is None:
                raise IOError('Failed to parse string {0}'.format(l))

            dataBlockDescriptor = PhastaRawFileReader.DataBlockDescriptor()
            dataBlockDescriptor.posInFile = self.file.tell()
            dataBlockDescriptor.totalBytes = int(
                parseResult.named['totalBytes']) - 1
            dataBlockDescriptor.headerElements = [
                int(x) for x in parseResult.named['tail'].split()
            ]

            self.blockDescriptors[
                parseResult.named['name'].strip()] = dataBlockDescriptor
            self.file.seek(dataBlockDescriptor.totalBytes + 1, 1)
Example #22
0
def part1():
    claims = read_input()

    claim_format = compile("#{} @ {},{}: {}x{}")

    squares = {}

    for claim in claims:
        id, claim_x, claim_y, width, height = claim_format.parse(claim)

        for x in range(int(claim_x), int(claim_x) + int(width)):
            for y in range(int(claim_y), int(claim_y) + int(height)):
                square = (x, y)
                if square in squares:
                    squares[square] += 1
                else:
                    squares[square] = 1

    dupe_squares = {k: v for k, v in squares.items() if v > 1}
    print("There are {} squares in two or more claims".format(
        len(dupe_squares)))

    for claim in claims:
        id, claim_x, claim_y, width, height = claim_format.parse(claim)

        has_overlap = False

        for x in range(int(claim_x), int(claim_x) + int(width)):
            for y in range(int(claim_y), int(claim_y) + int(height)):
                if (x, y) in dupe_squares:
                    has_overlap = True
                    break

        if not has_overlap:
            print("Claim {} does not overlap other claims".format(id))
Example #23
0
class Hook(object):
    hook_pattern = parse.compile(
        "{define}({address},{name},{size}){tail}", case_sensitive=True)
    
    def __init__(self, address: str, name: str, size: str):
        self.address = address.strip()
        self.name = name.strip()
        self.size = size.strip()
        self.annotated = False

        if is_hex(self.address) == False or is_hex(self.size) == False:
            log("hook parse error: " + self.GetInjString() + '\n')

    @staticmethod
    def IsHook(line: str):
        if line.find("DEFINE_HOOK") >= 0:
            if line.startswith("DEFINE_HOOK") == False:
                log("source parse error: " + line)
                return False
            hook_profile = Hook.hook_pattern.parse(line)
            if hook_profile == None:
                log("source parse error: " + line)
            return hook_profile != None
        return False

    def __eq__(self, o) -> bool:
        if o == None:
            return False
        return self.address == o.address and self.name == o.name and self.size == o.size

    def __ne__(self, o) -> bool:
        return not self == o

    def GetInjString(self):
        return self.address + " = " + self.name + ", " + self.size
Example #24
0
def process_input(file_name: str) -> list[int]:
    with (pathlib.Path(__file__).parent / file_name).open() as input_file:
        pattern = parse.compile("{}{:d}")
        output = [
            pattern.parse(line).fixed for line in input_file if line.strip()
        ]
    return output
Example #25
0
    def validate_meta(self):
        ''' Iteratively validate the meta csv file to ensure the ESC50 records are accurate '''

        assert len(self.meta) > 0, 'No meta files were found'
        assert len(self.audio_files) <= len(
            self.meta), 'More audio than meta records were found'
        assert len(self.meta) <= len(
            self.audio_files), 'More meta records than audio were found'

        file_pattern = parse.compile('{fold}-{source}-{take}-{target}.wav')
        for row in self.meta:
            attributes = file_pattern.parse(row["filename"])

            if attributes is None:
                print(
                    f'{row["filename"]} not matching pattern <fold>-<source>-<take>-<target>.wav'
                )
            else:
                attribute_keys = ['fold', 'target', 'take']

                errors = []
                for key in attribute_keys:
                    if attributes.named[key] != row[key]:
                        errors.append(
                            f'{key} expected {row[key]} got {attribute.named[key]}'
                        )

                if len(errors) > 0:
                    print(f'{row["filename"]} incorrect. {", ".join(errors)}')
Example #26
0
def main():
    content = []
    parserExp = parse.compile("{date} price {symbol} {value:g} {fiat_symbol}");
    if os.path.exists(pricedb_path):
        with open(pricedb_path, "r") as file:
            for line in file:
                res = parserExp.parse(line);
                dt = datetime.datetime.strptime(res['date'], "%Y-%m-%d")
                info = {
                    "date": dt,
                    "symbol": res['symbol'],
                    "price":  str(res['value']),
                    "fiat_symbol": res['fiat_symbol']
                }
                content.append(info)
    db_content = []
    startDate = datetime.datetime.strptime(start_at, "%Y/%m/%d")
    delta = datetime.datetime.now() - startDate;
    for cur in currencies:
        cur_list = filter(lambda c: c['symbol'] == cur, content)
        for i in range(delta.days + 1):
            dt = startDate + datetime.timedelta(days=i)
            res = list(filter(lambda date: date['date'].date() == dt.date(), cur_list))
            if not res:
                res = queryPrice(dt, cur)
            else:
                res = res[0]
                content.remove(res)
            db_content.append(res)
    for element in content:
        db_content.append(element); # Push remaining elements to final array
    db_content = sorted(db_content, key=lambda k: (k['date'], k['symbol']))
    writePriceDatabase(db_content);
Example #27
0
def parse_cryosat_l1b_filename(filename):
    """
    Returns the information in the CryoSat-2 l1b filename
    """
    # Strip path and file extension
    filename = Path(filename).stem
    # Construct the parser
    parser_str = "CS_{proc_stage}_"
    parser_str += "{instrument}_"
    parser_str += "{radar_mode}_"
    parser_str += "{data_level}_"
    parser_str += "{start_dt}_"
    parser_str += "{stop_dt}_"
    parser_str += "{baseline}"
    parser = parse.compile(parser_str)
    # Parse the filename
    result = parser.parse(filename)
    # Do some post-editing
    # - parse is not that smart when it comes to work with date strings
    # - naming conventions as the rest of pysiral
    info = {}
    info["mission"] = "cryosat2"
    info["instrument"] = result["instrument"].lower()
    info["radar_mode"] = result["radar_mode"].lower()
    info["data_level"] = "L"+result["data_level"]
    info["start_dt"] = dtparser.parse(result["start_dt"])
    info["stop_dt"] = dtparser.parse(result["stop_dt"])
    info["baseline"] = result["baseline"]
    return AttrDict(info)
class Stars(object):
    _format = parse.compile("position=<{},{}> velocity=<{},{}>")

    def __init__(self, star_data_str):
        r = Stars._format.parse(star_data_str)
        self._x = int(r[0])
        self._y = int(r[1])
        self._spe_x = int(r[2])
        self._spe_y = int(r[3])

    def move(self):
        self._x += self._spe_x
        self._y += self._spe_y

    def has_position(self, x, y):
        return self._x == x and self._y == y

    def __str__(self):
        return "({},{})".format(self._x, self._y)

    def distance(self, star2):
        return abs(star2._x - self._x) + abs(star2._y - self._y)

    def __eq__(self, star2):
        return star2._x == self._x and star2._y == self._y
Example #29
0
File: core.py Project: l6a/hallmark
def ParaFrame(fmt, *args, debug=False, **kwargs):
    pattern = fmt

    for i in range(len(fmt) // 3):
        if debug:
            print(i, pattern, args, kwargs)
        try:
            pattern = pattern.format(*args, **kwargs)
            break
        except KeyError as e:
            k = e.args[0]
            pattern = re.sub(r'\{' + k + ':?.*?\}', '{' + k + ':s}', pattern)
            kwargs[e.args[0]] = '*'

    files = sorted(glob(pattern))
    if debug:
        print(f'Pattern: "{pattern}"')
        n = len(files)
        if n > 1:
            print(f'{n} matches, e.g., "{files[0]}"')
        elif n > 0:
            print(f'{n} match, i.e., "{files[0]}"')
        else:
            print(f'No match; please check format string')

    parser = parse.compile(fmt)

    l = []
    for f in files:
        r = parser.parse(f)
        if r is None:
            print(f'Failed to parse "{f}"')
        else:
            l.append({'path': f, **r.named})
    return pd.DataFrame(l)
Example #30
0
    def match(sentence, steps):
        """
            Tries to find a match from the given sentence with the given steps

            :param string sentence: the step sentence to match
            :param dict steps: the available registered steps

            :returns: the arguments and the func which were matched
            :rtype: tuple
        """
        for pattern, func in steps.items():
            if isinstance(pattern, re._pattern_type):  # pylint: disable=protected-access
                match = pattern.search(sentence)
                if match:
                    return StepMatch(args=match.groups(),
                                     kwargs=match.groupdict(),
                                     func=func)
            else:
                try:
                    compiled = parse.compile(pattern,
                                             ArgExpRegistry().expressions)
                except ValueError as e:
                    raise StepPatternError(pattern, func.__name__, e)

                match = compiled.search(sentence)
                if match:
                    return StepMatch(args=match.fixed,
                                     kwargs=match.named,
                                     func=func)

        return None
Example #31
0
 def parse_data(self):
     horizontal = []
     vertical = []
     diagonal = []
     parser = parse.compile(r'{:d},{:d} -> {:d},{:d}')
     for x1, y1, x2, y2 in parser.findall(self.data):
         if x1 == x2:
             # horizontal line
             start, end = sorted([y1, y2])
             l = [(x1, y) for y in range(start, end + 1)]
             horizontal.extend(l)
         elif y1 == y2:
             # vertical line
             start, end = sorted([x1, x2])
             l = [(x, y1) for x in range(start, end + 1)]
             vertical.extend(l)
         elif x1 < x2:
             x_range = range(x1, x2 + 1)
             y_range = range(y1, y2 +
                             1) if y1 < y2 else range(y1, y2 - 1, -1)
             l = [(x, y) for x, y in zip(x_range, y_range)]
             diagonal.extend(l)
         else:
             x_range = range(x1, x2 - 1, -1)
             y_range = range(y1, y2 +
                             1) if y1 < y2 else range(y1, y2 - 1, -1)
             l = [(x, y) for x, y in zip(x_range, y_range)]
             diagonal.extend(l)
     return horizontal + vertical, diagonal
Example #32
0
    def match(sentence, steps):
        """
            Tries to find a match from the given sentence with the given steps

            :param string sentence: the step sentence to match
            :param dict steps: the available registered steps

            :returns: the arguments and the func which were matched
            :rtype: tuple
        """
        for pattern, func in steps.items():
            if isinstance(pattern, re._pattern_type):  # pylint: disable=protected-access
                match = pattern.search(sentence)
                if match:
                    return StepMatch(args=match.groups(), kwargs=match.groupdict(), func=func)
            else:
                try:
                    compiled = parse.compile(pattern, ArgExpRegistry().expressions)
                except ValueError as e:
                    raise StepPatternError(pattern, func.__name__, e)

                match = compiled.search(sentence)
                if match:
                    return StepMatch(args=match.fixed, kwargs=match.named, func=func)

        return None
Example #33
0
    def id_outliers_fn(outlier_report, threshold, dwi_file):
        """Get list of scans that exceed threshold for number of outliers

        Parameters
        ----------
        outlier_report: string
            Path to the fsl_eddy outlier report

        threshold: int or float
            If threshold is an int, it is treated as number of allowed outlier
            slices. If threshold is a float between 0 and 1 (exclusive), it is
            treated the fraction of allowed outlier slices before we drop the
            whole volume.

        dwi_file: string
            Path to nii dwi file to determine total number of slices

        Returns
        -------
        drop_scans: numpy.ndarray
            List of scan indices to drop
        """
        import nibabel as nib
        import numpy as np
        import os.path as op
        import parse

        with open(op.abspath(outlier_report), 'r') as fp:
            lines = fp.readlines()

        p = parse.compile(
            "Slice {slice:d} in scan {scan:d} is an outlier with "
            "mean {mean_sd:f} standard deviations off, and mean "
            "squared {mean_sq_sd:f} standard deviations off.")

        outliers = [p.parse(l).named for l in lines]
        scans = {d['scan'] for d in outliers}

        def num_outliers(scan, outliers):
            return len([d for d in outliers if d['scan'] == scan])

        if 0 < threshold < 1:
            img = nib.load(dwi_file)
            try:
                threshold *= img.header.get_n_slices()
            except nib.spatialimages.HeaderDataError:
                print(
                    'WARNING. We are not sure which dimension has the '
                    'slices in this image. So we are using the 3rd dim.',
                    img.shape)
                threshold *= img.shape[2]

        drop_scans = np.array(
            [s for s in scans if num_outliers(s, outliers) > threshold])

        outpath = op.abspath("dropped_scans.txt")
        np.savetxt(outpath, drop_scans, fmt="%d")

        return drop_scans, outpath
def sentence_mapping(sentence, threshold=0.0):
    """
    Maps a sentence and returns the original and the mapped.

    @param sentence: The sentence to map.
    @return: The original sentence and the mapped sentence.
    """

    found = False
    options = []
    original = None
    translation = None

    # first look for general blocks
    for elem in GENERAL:
        if elem[0][:3] == sentence.replace('    ', '').replace('(', '')[:3]:
            options.append(elem)
            found = True

    # then look for robotics blocks
    if not found:
        for elem in ROBOTICS:
            if elem[0][:3] == sentence.replace('    ', '').replace('(',
                                                                   '')[:3]:
                options.append(elem)
                found = True

    if found:
        # select the option that better fits
        l = [(m[0], m[1], similar(sentence, m[0])) for m in options]
        original, translation, score = max(l, key=lambda item: item[2])

        if score < threshold:
            return None, None

        # clean sentence
        s = sentence.replace('    ', '').replace('(', '').replace(')', '')

        # extract arguments
        p = compile(original)
        args = p.parse(s)

        if args:
            args_aux = list(args)

            # look for more blocks
            for idx in range(len(args_aux)):
                new_ori, new_trans = sentence_mapping(args_aux[idx], 0.6)

                if new_trans != None:
                    # print "args: ",idx, args_aux[idx]
                    # print "trans: ",new_trans
                    args_aux[idx] = new_trans

            # print "trans: ",translation
            # print "args: ",args_aux
            translation = translation.format(l=args_aux)

    return original, translation
Example #35
0
 def test_too_many_fields(self):
     # Python 3.5 removed the limit of 100 named groups in a regular expression,
     # so only test for the exception if the limit exists.
     try:
         re.compile("".join("(?P<n{n}>{n}-)".format(n=i) for i in range(101)))
     except AssertionError:
         p = parse.compile("{:ti}" * 15)
         self.assertRaises(parse.TooManyFields, p.parse, "")
Example #36
0
 def __init__(self, spec):
     spec = grok_re_preprocess(spec)
     if is_named_re(spec):
         self.re = regex.compile(spec)
         self.parse = None
     else:
         self.re = None
         self.parse = parse.compile(spec)
 def __init__(self, spec):
     spec, pattern_types = grok_re_preprocess(spec)
     self.type_collection = TypeCollection(pattern_types)
     if is_named_re(spec):
         self.re = regex.compile(spec)
         self.parse = None
     else:
         self.re = None
         self.parse = parse.compile(spec)
Example #38
0
def parse_sms_set_var(context, expression):
    assert context.persona is not None, u'no persona is setup'
    url = urlparse(context.browser.url).path
    parser = parse.compile(expression)
    res = parser.parse(url)
    assert res, u'expression not found'
    assert res.named, u'expression not found'
    for key, val in res.named.items():
        context.persona[key] = val
Example #39
0
def parse_sms_set_var(context, tel, expression):
    assert context.persona is not None, u'no persona is setup'
    msgs = context.sms.user_messages(tel)
    assert msgs, u'no sms received'

    parser = parse.compile(expression)
    res = parser.parse(msgs[-1])

    # Make an implicit assumption that there might be something before/after the expression
    if res is None:
        expression = '{}' + expression + '{}'
        parser = parse.compile(expression)
        res = parser.parse(msgs[-1])

    assert res, u'expression not found'
    assert res.named, u'expression not found'
    for key, val in res.named.items():
        context.persona[key] = val
Example #40
0
def get_compilestats(prog_out):
    """ Get the LLVM compilation stats from :prog_out:. """
    from parse import compile

    stats_pattern = compile("{value:d} {component} - {desc}\n")

    for line in prog_out.split("\n"):
        res = stats_pattern.search(line + "\n")
        if res is not None:
            yield res
Example #41
0
 def y(fmt, s, e, str_equals=False):
     p = parse.compile(fmt)
     r = p.parse(s)
     if r is None:
         self.fail("%r (%r) did not match %r" % (fmt, p._expression, s))
     r = r.fixed[0]
     if str_equals:
         self.assertEqual(str(r), str(e), "%r found %r in %r, not %r" % (fmt, r, s, e))
     else:
         self.assertEqual(r, e, "%r found %r in %r, not %r" % (fmt, r, s, e))
Example #42
0
def parse_email_set_var(context, address, expression):
    expression = expression.encode('utf-8')
    assert context.persona is not None, u'no persona is setup'
    msgs = context.mail.user_messages(address)
    assert msgs, u'no email received'

    parser = parse.compile(expression)
    res = parser.parse(msgs[-1])

    # Make an implicit assumption that there might be something before/after the expression
    if res is None:
        expression = '{}' + expression + '{}'
        parser = parse.compile(expression)
        res = parser.parse(msgs[-1])

    assert res, u'expression not found'
    assert res.named, u'expression not found'
    for key, val in res.named.items():
        context.persona[key] = val
Example #43
0
 def y(fmt, s, e, tz=None):
     p = parse.compile(fmt)
     r = p.parse(s)
     if r is None:
         self.fail('%r (%r) did not match %r' % (fmt, p._expression, s))
     r = r.fixed[0]
     self.assertEqual(r, e,
         '%r found %r in %r, not %r' % (fmt, r, s, e))
     if tz is not None:
         self.assertEqual(r.tzinfo, tz,
             '%r found TZ %r in %r, not %r' % (fmt, r.tzinfo, s, e))
Example #44
0
def _get_param_values(pattern, paths):
    values = []
    wildcard_paths = set()
    parser = parse.compile(pattern)

    for path in paths:
        result = parser.parse(str(path))
        wildcard_paths.add(result.fixed)
        values.append(result.named)

    return values, wildcard_paths
Example #45
0
File: bot.py Project: dromi/botplug
 def create_parse_dict(self):
     parsedict = {}
     parsedict['hej'] = compile(u"hej")
     parsedict['sig'] = compile(u"sig {}")
     parsedict['kom'] = compile(u"kommandoer")
     parsedict['hjæ'] = compile(u"hjælp {command}")
     parsedict['sov'] = compile(u"gå i seng")
     parsedict['clo'] = compile(u"closing time")
     parsedict['sut'] = compile(u"sut dut")
     return parsedict
Example #46
0
    def on_message(self, message, nickname, channel, is_query):

        #Checks if message was from a PM or channel
        if is_query:
            reply_to = nickname
        else:
            reply_to = channel
        commands = [
                    (parse.compile(".add {info}"), self.add_info),
                    (parse.compile(".info {name_raw}"), self.get_info),
                    (parse.compile(".delete {name_raw}"), self.delete_info),
                    (parse.compile(".freeze {name_raw}"), self.freeze_info),
                    (parse.compile(".unfreeze {name_raw}"), self.unfreeze_info),
                    (parse.compile(".set {name_raw:S} {info}"), self.set_info), # technically this means extra spaces before the name will cause issues...
                    self.prompt(".help", "Try '.info help'"),
                    self.prompt(".add", "Usage: '.add some info about yourself here'"),
                    self.prompt(".info", "Usage: '.info username'"),
                    self.prompt(".delete", "Mod only usage: '.delete username'"),
                    self.prompt(".freeze", "Mod only usage: '.freeze username'"),
                    self.prompt(".unfreeze", "Mod only usage: '.freeze username'"),
                    self.prompt(".set", "Mod only usage: '.set username then some info about them'")
                   ]
        for parser, func in commands:
            attempt = parser.parse(message.strip()) #stripping the message here
            if attempt != None:
                return func(nickname, channel, reply_to, **attempt.named)
Example #47
0
def load_data(year=2015, path="data/in/france/"):
    data_dir = os.path.abspath(os.path.join(os.path.join(CUR_DIR, os.pardir), path))
    fname = os.path.join(data_dir, str(year))
    p_round = compile("Round: {round}")
    p_date = compile("Date: {date}")
    p_game = compile(";{home};{h_goal}-{a_goal};{away}")
    data = list()
    row = dict()
    with open(fname) as f:
        for line in f:
            try:
                # Trying to read round number
                result = p_round.parse(line)
                row.update(result.named)
            except:
                pass
            try:
                # Trying to read date
                result = p_date.parse(line)
                row.update(result.named)
            except:
                pass
            try:
                # Trying to read game data
                result = p_game.parse(line)
                row.update(result.named)
                data.append(row)
                row = row.copy()
            except:
                pass
    df = DataFrame(data, columns=["season", "round", "date", "home", "away", "h_goal", "a_goal"])
    df["season"] = year
    df["round"] = pd.to_numeric(df["round"])
    df["date"] = pd.to_datetime(df["date"])
    df["home"] = df["home"].str.strip().astype(str)
    df["away"] = df["away"].str.strip().astype(str)
    df["h_goal"] = pd.to_numeric(df["h_goal"])
    df["a_goal"] = pd.to_numeric(df["a_goal"])
    return df
Example #48
0
def _get_command_output_lines(cmd, parse_string):
    lines = []

    stdout = Popen(cmd, stdout=PIPE).communicate()[0]
    stream = StringIO.StringIO(stdout)
    parser = compile(parse_string)

    for line in stream:
        res = parser.parse(line)
        if not res:
            continue
        lines.append(res.named)
    return lines
Example #49
0
def _emails(src_dir, pattern, params):
    wildcard_params = {k: '*' for k in params}
    wildcard_pattern = pattern.format(**wildcard_params)
    parser = parse.compile(pattern)

    for path in Path(src_dir).glob(wildcard_pattern):
        if not path.is_dir():
            str_path = str(path.relative_to(src_dir))
            result = parser.parse(str_path)
            result.named['path'] = str_path
            result.named['full_path'] = str(path.resolve())
            logging.debug('loading email %s', result.named['full_path'])
            yield result
Example #50
0
        def y(fmt, s, e, tz=None):
            p = parse.compile(fmt)
            r = p.parse(s)
            if r is None:
                self.fail("%r (%r) did not match %r" % (fmt, p._expression, s))
            r = r.fixed[0]
            try:
                self.assertEqual(r, e, "%r found %r in %r, not %r" % (fmt, r, s, e))
            except ValueError:
                self.fail("%r found %r in %r, not %r" % (fmt, r, s, e))

            if tz is not None:
                self.assertEqual(r.tzinfo, tz, "%r found TZ %r in %r, not %r" % (fmt, r.tzinfo, s, e))
Example #51
0
def sentence_mapping(sentence, threshold=None):
    """
    Maps a sentence and returns the original and the mapped.

    @param sentence: The sentence to map.
    @return: The original sentence and the mapped sentence.
    """

    found = False
    options = []
    original = None
    translation = None

    # first look for general blocks
    for elem in GENERAL:
        if elem[0][:3] == sentence.replace('    ', '')[:3]:
            options.append(elem)
            found = True

    # then look for robotics blocks
    for elem in ROBOTICS:
        if elem[0][:3] == sentence.replace('    ', '').replace('(', '')[:3]:
            options.append(elem)
            found = True

    if found:
        # select the option that better fits
        l = [(m[0], m[1], similar(sentence, m[0])) for m in options]
        original, translation, score = max(l, key=lambda item: item[2])
        if threshold and score < threshold:
            return None, None

        # extract arguments
        p = compile(original)

        args = p.parse(sentence.replace('    ', ''))

        if args:
            args_aux = list(args)

            # look for more blocks
            for idx in range(len(args_aux)):
                new_ori, new_trans = sentence_mapping(args_aux[idx], 0.8)

                if new_trans != None:
                    args_aux[idx] = args_aux[idx].replace(new_ori, new_trans)

            translation = translation % tuple(args_aux)

    return original, translation
Example #52
0
def _emails(root_path, pattern, params):
    source_path = os.path.join(root_path, config.paths.source)
    wildcard_params = {k: '*' for k in params}
    wildcard_pattern = pattern.format(**wildcard_params)
    parser = parse.compile(pattern)
    glob_path = Path(source_path).glob(wildcard_pattern)
    for path in sorted(glob_path, key=lambda path: str(path)):
        if not path.is_dir() and _has_correct_ext(path, pattern):
            str_path = str(path.relative_to(source_path))
            result = parser.parse(str_path)
            if result:  # HACK: result can be empty when pattern doesn't contain any placeholder
                result.named['path'] = str(path.resolve())
                if not str_path.endswith(const.GLOBALS_EMAIL_NAME + const.SOURCE_EXTENSION):
                    logger.debug('loading email %s', result.named['path'])
                    yield result
Example #53
0
 def _build_routes(self):
     routes = sorted(
         (
             (method.upper(), len(route.path.split("/")), route.path, getattr(route, method))
             for route in self.children
             for method in route.methods
             if getattr(route, method)
         ),
         reverse=True,
     )
     return {
         method: sorted(
             ((compile(p), cb) for _, _, p, cb in info), reverse=True, key=lambda x: len(x[0]._format.split("/"))
         )
         for method, info in groupby(routes, itemgetter(0))
     }
Example #54
0
    def _validate_format(self):
        # interpret escape sequences
        self._args.format = bytes(
            self._args.format, "utf-8"
        ).decode("unicode_escape")

        keys = ['id', 'title', 'link', 'excerpt', 'tags']
        info = dict((key, None) for key in keys)

        try:
            self._args.format.format(**info)
        except KeyError:
            print('Invalid Format Specifier!')
            sys.exit(1)
        else:
            self._format_spec = self._args.format + '\n'
            self._unformat_spec = parse.compile(self._args.format)
Example #55
0
def _emails(src_dir, pattern, params, exclusive_path=None):
    wildcard_params = {k: '*' for k in params}
    wildcard_pattern = pattern.format(**wildcard_params)
    parser = parse.compile(pattern)

    if exclusive_path:
        glob_path = Path('.').glob(exclusive_path)
    else:
        glob_path = Path(src_dir).glob(wildcard_pattern)

    for path in sorted(glob_path, key=lambda path: str(path)):
        if not path.is_dir() and (not exclusive_path or _has_correct_ext(path, pattern)):
            str_path = str(path.relative_to(src_dir))
            result = parser.parse(str_path)
            result.named['path'] = str_path
            result.named['full_path'] = str(path.resolve())
            logging.debug('loading email %s', result.named['full_path'])
            yield result
Example #56
0
def get_compilestats(prog_out):
    """ Get the LLVM compilation stats from :prog_out:. """

    class CompileStatsParserError(RuntimeWarning):
        pass

    stats_pattern = parse.compile("{value:d} {component} - {desc}\n")

    for line in prog_out.split("\n"):
        if line:
            try:
                res = stats_pattern.search(line + "\n")
            except ValueError as e:
                warnings.warn(
                    "Triggered a parser exception for: '" + line + "'\n",
                    CompileStatsParserError)
                res = None
            if res is not None:
                yield res
Example #57
0
File: client.py Project: nsp/pockyt
    def _validate_format(self):
        # interpret escape sequences
        try:
            self._args.format = bytes(
                self._args.format, 'utf-8'
            ).decode('unicode_escape')
        except TypeError:
            self._args.format = self._args.format.decode('unicode_escape')

        info = dict((key, None) for key in API.INFO_KEYS)

        try:
            self._args.format.format(**info)
        except KeyError:
            print('Invalid Format Specifier !')
            sys.exit(1)
        else:
            self._format_spec = self._args.format + '\n'
            self._unformat_spec = parse.compile(self._args.format)
Example #58
0
    def __init__(self, port=None, baud=38400):
        if port:
            self.port = port
        else:
            self.port = serial.tools.list_ports.comports()[-1][0]
            #self.port = "/dev/ttyUSB0"
        self.ser = serial.Serial(self.port, baud,timeout=30.)  # open serial port
        self.ser.flushInput()
        self.p = compile("G{code} X{x} Y{y}")
        print self.ser.readline()
        self.busy = False
        #self.homepen()
        self.ser.write(str(150) + 'c')
        self.stop = False
        self.homexy()
        self.ser.flushInput()

        self.w = 1600
        self.h= 1200
Example #59
0
def get_stored_targets(base_dir, log_filename_format=None):
    """Get parsed stored targets."""
    assert log_filename_format
    if '{job.state}' in log_filename_format:
        log_filename_format = log_filename_format.replace(
            '{job.state}', '{job.state:w}')

    if '{job.number}' in log_filename_format:
        log_filename_format = log_filename_format.replace(
            '{job.number}', '{job.number:f}')

    parser = parse.compile(log_filename_format)

    if base_dir[-1] != '/':
        base_dir = base_dir + '/'

    filenames = get_files(base_dir)

    targets = []
    for filename in filenames:
        parsed_filename = parser.parse(filename)
        if not parsed_filename:
            __logs__.warning('Unexpected filename {0}'.format(filename))
            continue
        target = Target()
        try:
            target.slug = parsed_filename['job.repository.slug']
        except KeyError:
            pass
        try:
            target.number = str(parsed_filename['job.number'])
        except KeyError:
            pass
        try:
            target.state = parsed_filename['job.state']
        except KeyError:
            pass

        targets.append(target)

    return targets
Example #60
0
def _emails(src_dir, pattern, params, exclusive_path=None, include_global=False):
    wildcard_params = {k: '*' for k in params}
    wildcard_pattern = pattern.format(**wildcard_params)
    parser = parse.compile(pattern)

    if exclusive_path:
        glob_path = Path('.').glob(exclusive_path)
    else:
        glob_path = Path(src_dir).glob(wildcard_pattern)

    global_email_pattern = re.compile('/%s\.xml$' % GLOBAL_PLACEHOLDERS_EMAIL_NAME)
    for path in sorted(glob_path, key=lambda path: str(path)):
        if not path.is_dir() and (not exclusive_path or _has_correct_ext(path, pattern)):
            str_path = str(path.relative_to(src_dir))
            result = parser.parse(str_path)
            if result:  # HACK: result can be empty when pattern doesnt cotain any placeholder
                result.named['path'] = str_path
                result.named['full_path'] = str(path.resolve())
                if not re.findall(global_email_pattern, str_path) or include_global:
                    logging.debug('loading email %s', result.named['full_path'])
                    yield result