Example #1
0
def test2():
    path = './data/list.txt'
    manifest_dic = fetcher.fetch_manifest_from_file(path)
    for manifest_url in manifest_dic.keys():
        SSM = SmoothStreamingMedia()
        parser.parse(manifest_dic[manifest_url], manifest_url, SSM)
        fetcher.fetch_fragment(SSM)
Example #2
0
def datestring_to_datetime(inp):
    """
    Convert a date/time string do proper start (and optionally end) datetime
    """
    if type(inp) in [str, unicode]:
        string = inp.strip()
        format = '{day:d}.{month:d}.{year:d} {hour:d}:{minute:d}-{hourend:d}:{minuteend:d}'
        p = parse.parse(format, string)
        if p is not None:
            out = datetime.datetime(p['year'], p['month'], p['day'], p['hour'],
                                    p['minute'])
            #sys.stdout.write("\nA p: %s, datetime: %s\n" % (p, out.isoformat()))
            return out
        else:
            format = '{day:d}.{month:d}.{year:d} {hour:d}:{minute:d}'
            p = parse.parse(format, string)
            if p is not None:
                out = datetime.datetime(p['year'], p['month'], p['day'],
                                        p['hour'], p['minute'])
                #sys.stdout.write("\nB p: %s, datetime: %s\n" % (p, out.isoformat()))
                return out
            else:
                format = '{day:d}.{month:d}.{year:d}'
                p = parse.parse(format, string)
                if p is not None:
                    out = datetime.datetime(p['year'], p['month'], p['day'], 0,
                                            0)
                    #sys.stdout.write("\nB p: %s, datetime: %s\n" % (p, out.isoformat()))
                    return out
                else:
                    return None
    return inp
Example #3
0
    def __init__(self,item,parent):
        self.item = item
        self.parent = parent
        self.x = float(item.attrib['x'])
        self.y = float(item.attrib['y'])
        self.rot = 0.0
        if 'rot' in item.attrib.keys():
            rot = item.attrib['rot']
            print rot
            rotParse = parse.parse('R{:d}',rot)
            if rotParse is None:
                rotParse = parse.parse('MR{:d}',rot)
            self.rot = float(rotParse[0])*(math.pi/180)

        rot =  - self.parent.rot
        rot2 = -self.rot - self.parent.rot
        self.underMouse = False

        (self.x,self.y) = rotate(self.x,self.y,rot)

        self.dx = float(item.attrib['dx'])
        self.dy = float(item.attrib['dy'])
        (self.dx,self.dy) = rotate(self.dx,self.dy,rot2)
        (self.dx,self.dy) = (self.dx,self.dy)
        (self.x,self.y) = (self.x-self.dx/2,self.y-self.dy/2)

        self.name = item.attrib['name']
Example #4
0
def main():
    # parse command
    command_log = 'CIRCexplorer parameters: ' + ' '.join(sys.argv)
    if len(sys.argv) == 1:
        sys.exit(help_doc)
    elif sys.argv[1] == '--version' or sys.argv[1] == '-v':
        sys.exit(__version__)
    elif sys.argv[1] == 'align':
        import align
        align.align(docopt(align.__doc__, version=__version__),
                    command=command_log, name='align')
    elif sys.argv[1] == 'parse':
        import parse
        parse.parse(docopt(parse.__doc__, version=__version__),
                    command=command_log, name='parse')
    elif sys.argv[1] == 'annotate':
        import annotate
        annotate.annotate(docopt(annotate.__doc__, version=__version__),
                          command=command_log, name='annotate')
    elif sys.argv[1] == 'assemble':
        import assemble
        assemble.assemble(docopt(assemble.__doc__, version=__version__),
                          command=command_log, name='assemble')
    elif sys.argv[1] == 'denovo':
        import denovo
        denovo.denovo(docopt(denovo.__doc__, version=__version__),
                      command=command_log, name='denovo')
    else:
        sys.exit(help_doc)
Example #5
0
def parse_sig_command(msg, query):
    reply = ""

    if str(parse("sig til {unick} at {arg}", query)) != "None":
        p = parse("sig til {unick} at {arg}", query)
        reply = p['unick'] + ": " + p['arg']

    else:
        reply = "Brug: sig til <person> at han/hun <besked>"

    replace_dict = (
        ('jeg', msg['mucnick']),
        ('du', 'jeg'),
        ('han', 'du'),
        ('hun', 'du'),
        ('sig', 'dig'),
        ('min', msg['mucnik']),
        ('din', 'min'),
        ('sin', 'din'),
        ('dit', 'mit'),
        ('dine', 'mine'),
        ('sine', 'dine'),
    )

    for old, new in replace_dict:
        reply = reply.replace(old, new)

    return reply
Example #6
0
def test_parse():
    """Assert the correct return values when parse is called to parse,
    group and retun desired information from a string.
    """
    regex = "(?P<name>[a-zA-Z]+ ?[a-zA-Z]*)"
    assert parse.parse(regex, 'name', '07734 Hyewon Namkung') == 'Hyewon Namkung'
    assert not parse.parse(regex, 'name', '123450987*&!@#$')
Example #7
0
def parse_install_output(output):
    """Parse output from pip download to get name and file mappings
    for all dependencies and their sub dependencies.

    This is required for proper file hashing with --require-hashes.
    """
    output_sections = output.split('Collecting ')
    names = []

    for section in output_sections:
        lines = section.split('\n')

        # Strip dependency parens from name line. e.g. package (from other_package)
        name = lines[0].split('(')[0]
        # Strip version specification. e.g. package; python-version=2.6
        name = name.split(';')[0]

        for line in lines:
            r = parse.parse('Saved {file}', line.strip())
            if r is None:
                r = parse.parse('Using cached {file}', line.strip())
            if r is None:
                continue

            fname = r['file'].split(os.sep)[-1]
            # Unencode percent-encoded values like ``!`` in version number.
            fname = requests.compat.unquote(fname)

            names.append((fname, name.strip()))
            break

    return names
    def test_biconditional(self):
        statement = parse("A<->B")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "<->")
        self.assertEqual(statement.value1().type, "lit")
        self.assertEqual(statement.value2().type, "lit")
        self.assertEqual(statement.__str__(), "(A<->B)")

        statement = parse("A<->(B<->C)")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "<->")
        self.assertEqual(statement.value1().type, "lit")
        self.assertEqual(statement.value2().type, "<->")
        self.assertEqual(statement.__str__(), "(A<->(B<->C))")

        statement = parse("(A<->B)<->C")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "<->")
        self.assertEqual(statement.value1().type, "<->")
        self.assertEqual(statement.value2().type, "lit")
        self.assertEqual(statement.__str__(), "((A<->B)<->C)")

        statement = parse("(A<->B)<->(C<->D)")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "<->")
        self.assertEqual(statement.value1().type, "<->")
        self.assertEqual(statement.value2().type, "<->")
        self.assertEqual(statement.__str__(), "((A<->B)<->(C<->D))")

        statement = parse("A<->(B<->(C<->D))")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.__str__(), "(A<->(B<->(C<->D)))")
Example #9
0
def parse_for_req_prov_params(script_fpath):
    """
    """
    with open(script_fpath, "r") as f:
        all_lines = f.readlines()
    fnames_req_prov_dict = {}
    all_required_params = []
    all_provided_params = []
    for i in range(len(all_lines) - 1):
        if "@myFeature" in all_lines[i] and "def " in all_lines[i + 1]:
            reqs_provs_1 = parse(
                "@myFeature(requires={requires}, provides={provides})",
                all_lines[i].strip())
            func_name = parse(
                "def {funcname}({args}):", all_lines[i + 1].strip())
            fnames_req_prov_dict[func_name.named['funcname']] = {
                "requires": eval(reqs_provs_1.named["requires"]),
                "provides": eval(reqs_provs_1.named["provides"])}
            all_required_params = list(set(
                all_required_params +
                list(set(eval(reqs_provs_1.named["requires"])))))
            all_provided_params = list(set(
                all_provided_params +
                list(set(eval(reqs_provs_1.named["provides"])))))
    return (fnames_req_prov_dict, all_required_params, all_provided_params)
Example #10
0
def parse_r_type(opcode, line):
    if opcode in ['add', 'sub', 'mul', 'and', 'or', 'xor', 'slt', 'seq']:
        return parse("{} ${}, ${}, ${}", line)
    if opcode in ['sll', 'srl', 'sra']:
        return parse("{} ${}, ${}, {}", line)
    if opcode == 'mv':
        return parse("{} ${}, ${}", line)
Example #11
0
    def __init__(self,element,pcb):
        self.element = element
        self.underMouse = False
        self.pcb = pcb

        self.libraryName = element.attrib['library']
        self.partName = element.attrib['name']
        self.packageName = element.attrib['package']
        print element,element.tag,element.attrib
        self.x = float(element.attrib['x'])
        self.y = float(element.attrib['y'])
        self.rot = 0
        self.pads = []
        self.smds = []

        if 'rot' in element.attrib:
            rot = element.attrib['rot']
            print rot
            rotParse = parse.parse('R{:d}',rot)
            if rotParse is None:
                rotParse = parse.parse('MR{:d}',rot)
            self.rot = float(rotParse[0])*(math.pi/180)
            print self.rot

        self.drawingElements = []

        self.findFromLibrary()
        self.loadFromLibrary()
        self.loadPads()
        self.loadSMDs()
Example #12
0
File: dice.py Project: cxbb/RJJBot
 def process_message(self, m):
   text = m.get('text')
   if text is None:
     return None
   sender = m['from']['first_name']
   text = re.sub(' +', ' ', text).strip() # strip multiple spaces
   ###### two arguments ######
   try:
     res = parse('/roll {:d} {:d}', text)
   except:
     res = None
   if res:
     faces = res.fixed[0]
     times = res.fixed[1]
     if faces >= 1 and times >= 1:
       out = [ random.randint(1, faces) for i in xrange(0, times)]
       return '%s rolled %s. The sum is %s.' % (sender, out, sum(out))
     else:
       return 'Positive numbers please.'
   ###### one argument ######
   try:
     res = parse('/roll {:d}', text)
   except:
     res = None
   if res:
     faces = res.fixed[0]
     if faces >= 1:
       return '%s rolled %s.' % (sender, random.randint(1, faces))
     else:
       return 'A positive number please.'
   ###### zero argument ######
   if text == '/roll':
     faces = 6
     return '%s rolled %s.' % (sender, random.randint(1, faces))
   return None
Example #13
0
    def test_implication(self):
        statement = parse("A->B")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "->")
        self.assertEqual(statement.value1().type, "lit")
        self.assertEqual(statement.value2().type, "lit")
        self.assertEqual(statement.__str__(), "(A->B)")

        statement = parse("A->(B->C)")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "->")
        self.assertEqual(statement.value1().type, "lit")
        self.assertEqual(statement.value2().type, "->")
        self.assertEqual(statement.__str__(), "(A->(B->C))")

        statement = parse("(A->B)->C")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "->")
        self.assertEqual(statement.value1().type, "->")
        self.assertEqual(statement.value2().type, "lit")
        self.assertEqual(statement.__str__(), "((A->B)->C)")

        statement = parse("(A->B)->(C->D)")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "->")
        self.assertEqual(statement.value1().type, "->")
        self.assertEqual(statement.value2().type, "->")
        self.assertEqual(statement.__str__(), "((A->B)->(C->D))")

        statement = parse("A->(B->(C->D))")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.__str__(), "(A->(B->(C->D)))")
Example #14
0
def list_features_provided(script_fpath):
    """Parses script and returns a list of all features it provides.

    Parses decorator expression in custom feature definitions script,
    returning a list of all feature names generated by the various
    definitions in that script.

    Parameters
    ----------
    script_fpath : str
        Path to custom features definition script.

    Returns
    -------
    list of str
        List of feature names that the script will generate.

    """
    with open(script_fpath, "r") as f:
        all_lines = f.readlines()
    fnames_req_prov_dict = {}
    all_required_params = []
    all_provided_params = []
    for i in range(len(all_lines) - 1):
        if "@myFeature" in all_lines[i] and "def " in all_lines[i + 1]:
            reqs_provs_1 = parse("@myFeature(requires={requires}, provides={provides})", all_lines[i].strip())
            func_name = parse("def {funcname}({args}):", all_lines[i + 1].strip())
            fnames_req_prov_dict[func_name.named["funcname"]] = {
                "requires": eval(reqs_provs_1.named["requires"]),
                "provides": eval(reqs_provs_1.named["provides"]),
            }
            all_required_params = list(set(all_required_params + list(set(eval(reqs_provs_1.named["requires"])))))
            all_provided_params = list(set(all_provided_params + list(set(eval(reqs_provs_1.named["provides"])))))
    return all_provided_params
Example #15
0
def datestring_to_datetime(inp):
  """
  Convert a date/time string do proper start (and optionally end) datetime
  """
  berlin = timezone('Europe/Berlin')
  if type(inp) in [str, unicode]:
    string = inp.strip()
    format = '{day:d}.{month:d}.{year:d} {hour:d}:{minute:d}-{hourend:d}:{minuteend:d}'
    p = parse.parse(format, string)
    if p is not None:
      out = datetime.datetime(p['year'], p['month'], p['day'], p['hour'], p['minute'], tzinfo=berlin)
      return out
    else:
      format = '{day:d}.{month:d}.{year:d} {hour:d}:{minute:d}'
      p = parse.parse(format, string)
      if p is not None:
        out = datetime.datetime(p['year'], p['month'], p['day'], p['hour'], p['minute'], tzinfo=berlin)
        return out
      else:
        format = '{day:d}.{month:d}.{year:d}'
        p = parse.parse(format, string)
        if p is not None:
          out = datetime.datetime(p['year'], p['month'], p['day'], 0, 0, tzinfo=berlin)
          return out
        else:
          return None
  return inp
Example #16
0
 def test_width_str(self):
     res = parse.parse('{:.2}{:.2}', 'look')
     self.assertEqual(res.fixed, ('lo', 'ok'))
     res = parse.parse('{:2}{:2}', 'look')
     self.assertEqual(res.fixed, ('lo', 'ok'))
     res = parse.parse('{:4}{}', 'look at that')
     self.assertEqual(res.fixed, ('look', ' at that'))
Example #17
0
 def test_width_empty_input(self):
     res = parse.parse('{:.2}', '')
     self.assertIsNone(res)
     res = parse.parse('{:2}', 'l')
     self.assertIsNone(res)
     res = parse.parse('{:2d}', '')
     self.assertIsNone(res)
Example #18
0
def parse_constraint(constraint):
    """
    Parse a constraint of form
    into an info dictionary of form
    {'comparator': comparator, 'tag': tag, 'version': version, 'postfix': postfix}

    Args:
        constraint(string): The string representing the constratint

    Returns:
        dictionary: The information dictionary
    """
    match = comparator_re.search(constraint)
    comparator = match.group(1)
    tag = match.group(2)

    version = None
    postfix = None
    parsed_results = parse('v{version}-{postfix}', tag)
    if parsed_results:
        version = parsed_results["version"]
        postfix = parsed_results["postfix"]
    else:
        parsed_results = parse('v{version}', tag)
        if parsed_results:
            version = parsed_results["version"]
            postfix = None

    return {'comparator': comparator,
            'tag': tag,
            'version': version,
            'postfix': postfix}
Example #19
0
def process(req, rep):
    (handle, tmp_in)  = tempfile.mkstemp()
    (handle, tmp_out) = tempfile.mkstemp()

    # Update carry-over parameters
    rep.sequence = req.sequence
    rep.recipient = req.recipient
    rep.identifier = req.identifier

    #  Write query sequence to file in FASTA format
    with open(tmp_in, 'w') as file:
        file.write('> x\n')
        file.write('%s\n' % req.sequence)

    params = { 'exe' : FLAGS.exe, 'db' : FLAGS.db, 'fasta' : tmp_in, 'out' : tmp_out }
    subprocess.check_call(hmmer_cmd.safe_substitute(params).split())

    parse(tmp_out, rep)
    fix_alignments(rep.alignments)

    try:
        os.remove(tmp_in)
        os.remove(tmp_out)
    except:
        sys.stderr.write('Failed to remove one or more temporary files: %s, %s' % (tmp_in, tmp_out))
Example #20
0
    def test_conjunction(self):
        statement = parse("A^B")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "^")
        self.assertEqual(statement.value1().type, "lit")
        self.assertEqual(statement.value2().type, "lit")
        self.assertEqual(statement.__str__(), "(A^B)")

        statement = parse("A^(B^C)")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "^")
        self.assertEqual(statement.value1().type, "lit")
        self.assertEqual(statement.value2().type, "^")
        self.assertEqual(statement.__str__(), "(A^(B^C))")

        statement = parse("(A^B)^C")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "^")
        self.assertEqual(statement.value1().type, "^")
        self.assertEqual(statement.value2().type, "lit")
        self.assertEqual(statement.__str__(), "((A^B)^C)")

        statement = parse("(A^B)^(C^D)")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "^")
        self.assertEqual(statement.value1().type, "^")
        self.assertEqual(statement.value2().type, "^")
        self.assertEqual(statement.__str__(), "((A^B)^(C^D))")

        statement = parse("A^(B^(C^D))")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.__str__(), "(A^(B^(C^D)))")
Example #21
0
def learn(*args):
  obs_ = args[0]
  gamma = 1
  if (len(args) > 1):
    gamma = args[1]
  if (len(args) > 2):
    R = args[2]
    parsed = par.parse(obs_,R)
  else:
    parsed = par.parse(obs_)
  #parsed = [stateMap,actionMap,observations]

  stateMap = parsed[0]
  actMap = parsed[1]
  obs = parsed[2]
  model = mod.model(len(stateMap),len(actMap),obs)

  P = model[0]
  R = model[1]
  policy = sol.policy(P,gamma,R)

  #map integer policy and action back to
  strat = {}
  for i in range(0,len(policy)):
    strat[stateMap[i]] = actMap[policy[i]]

  #return strategy
  return strat
Example #22
0
    def test_disjunction(self):
        statement = parse("AvB")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "v")
        self.assertEqual(statement.value1().type, "lit")
        self.assertEqual(statement.value2().type, "lit")
        self.assertEqual(statement.__str__(), "(AvB)")

        statement = parse("Av(BvC)")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "v")
        self.assertEqual(statement.value1().type, "lit")
        self.assertEqual(statement.value2().type, "v")
        self.assertEqual(statement.__str__(), "(Av(BvC))")

        statement = parse("(AvB)vC")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "v")
        self.assertEqual(statement.value1().type, "v")
        self.assertEqual(statement.value2().type, "lit")
        self.assertEqual(statement.__str__(), "((AvB)vC)")

        statement = parse("(AvB)v(CvD)")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.type, "v")
        self.assertEqual(statement.value1().type, "v")
        self.assertEqual(statement.value2().type, "v")
        self.assertEqual(statement.__str__(), "((AvB)v(CvD))")

        statement = parse("Av(Bv(CvD))")
        self.assertIsNotNone(statement)
        self.assertEqual(statement.__str__(), "(Av(Bv(CvD)))")
Example #23
0
def parse_i_type(opcode, line):
    if opcode in ['sw', 'lw', 'thread_finished']:
        return [opcode]
    if opcode in ['ldc', 'ldi']:
        return parse("{} ${}, {}", line)
    if opcode == 'addi':
        return parse("{} ${}, ${}, {}", line)
Example #24
0
def create_swig_xml(interface_filename, xml_filename):
    cmd = "swig -xml -xmllite -o %s %s" % (xml_filename, interface_filename)
    print(cmd)
    result = subprocess.call(cmd, shell=True)
    if result:
        sys.exit(result)

    usage = "usage: %prog [options] <header.h>"
    op = optparse.OptionParser(usage=usage)
    op.add_option("-o", "--output")
    op.add_option("-i", "--interface-file")
    op.add_option("-x", "--xml-file")
    op.add_option("-I", dest="includes", action="append", default=list())
    op.add_option("-D", dest="defines", action="append", default=list())
    (options, args) = op.parse_args(sys.argv[1:])

    if len(args) < 1:
        print("No input file given", file=sys.stderr)
        sys.exit(1)

    header_filename = args[0]
    module_name, _ = os.path.splitext(os.path.basename(header_filename))
    if options.interface_file is None:
        options.interface_file = module_name + ".i"
    if options.xml_file is None:
        options.xml_file = module_name + ".xml"
    if options.output is None:
        options.output = module_name + ".ffi"

    create_swig_interface(header_filename, options.interface_file, includes=options.includes, defines=options.defines)
    create_swig_xml(options.interface_file, options.xml_file)
    parse.parse(options.xml_file, options.output)
Example #25
0
 def test_dotted_type_conversion_pull_8(self):
     # test pull request 8 which fixes type conversion related to dotted
     # names being applied correctly
     r = parse.parse("{a.b:d}", "1")
     self.assertEqual(r["a.b"], 1)
     r = parse.parse("{a_b:w} {a.b:d}", "1 2")
     self.assertEqual(r["a_b"], "1")
     self.assertEqual(r["a.b"], 2)
Example #26
0
    def test_no_match(self):
        vals = []
        vals.append(parse("a"))
        vals.append(parse("AB"))
        vals.append(parse("A B"))

        for val in vals:
            self.assertIsNone(val, msg="Invalid pattern Parsed!")
    def test_reduce_literal(self):
        stmt = parse("A")
        red = parse("A")
        self.assertTrue(stmt.reduce(red))

        stmt = parse("A")
        red = parse("~A")
        self.assertFalse(stmt.reduce(red))
Example #28
0
def enterREPL():
    while(True):
        inp = raw_input(str(utils.lines) + "> ").strip()

        if(inp == "quit"):
            return

        parse.parse(inp)
Example #29
0
 def test_named_aligned_typed(self):
     # pull a named, typed values out of string
     r = parse.parse("hello {number:<d} {things}", "hello 12      people")
     self.assertEqual(r.named, dict(number=12, things="people"))
     r = parse.parse("hello {number:>d} {things}", "hello      12 people")
     self.assertEqual(r.named, dict(number=12, things="people"))
     r = parse.parse("hello {number:^d} {things}", "hello      12      people")
     self.assertEqual(r.named, dict(number=12, things="people"))
Example #30
0
 def test_dotted_type_conversion_pull_8(self):
     # test pull request 8 which fixes type conversion related to dotted
     # names being applied correctly
     r = parse.parse('{a.b:d}', '1')
     self.assertEqual(r['a.b'], 1)
     r = parse.parse('{a_b:w} {a.b:d}', '1 2')
     self.assertEqual(r['a_b'], '1')
     self.assertEqual(r['a.b'], 2)
    def identify(self, filepath, url=None):
        """
        Identifies a file of the layer

        :param filepath: filepath from AMQP
        :param url: fully qualified URL of file

        :returns: `list` of file properties
        """

        super().identify(filepath, url)

        self.model = 'cansips'

        LOGGER.debug('Loading model information from store')
        self.file_dict = json.loads(self.store.get_key(self.model))

        filename_pattern = self.file_dict[self.model]['filename_pattern']

        tmp = parse(filename_pattern, os.path.basename(filepath))

        file_pattern_info = {
            'resolution':
            tmp.named['resolution'],
            'wx_variable':
            '{}_{}_{}'.format(tmp.named['wx_variable'], tmp.named['pressure'],
                              tmp.named['pres_value']),
            'year_':
            tmp.named['YYYY'],
            'month_':
            tmp.named['MM']
        }

        LOGGER.debug('Defining the different file properties')
        self.wx_variable = file_pattern_info['wx_variable']

        if self.wx_variable not in self.file_dict[self.model]['variable']:
            msg = 'Variable "{}" not in ' \
                  'configuration file'.format(self.wx_variable)
            LOGGER.warning(msg)
            return False

        weather_var = self.file_dict[self.model]['variable'][self.wx_variable]
        self.geomet_layers = weather_var['geomet_layers']

        date_format = '%Y%m'
        self.date_ = '{}{}'.format(file_pattern_info['year_'],
                                   file_pattern_info['month_'])
        reference_datetime = datetime.strptime(self.date_, date_format)
        self.date_ = reference_datetime
        self.model_run = '{}Z'.format(reference_datetime.strftime('%H'))

        for band in self.file_dict[self.model]['bands']:

            dict_bands = self.file_dict[self.model]['bands']

            fhi = dict_bands[band]['forecast_interval']
            fhi = re.sub('[^0-9]', '', fhi)

            forecast_hour_datetime = reference_datetime + \
                relativedelta(months=int(fhi))

            elevation = weather_var['elevation']
            member = dict_bands[band]['member']

            mem_str = str(member).zfill(2)

            for layer, layer_config in self.geomet_layers.items():

                layer_name = layer.format(mem_str)
                str_mr = re.sub('[^0-9]', '',
                                reference_datetime.strftime(DATE_FORMAT))
                str_fh = re.sub('[^0-9]', '',
                                forecast_hour_datetime.strftime(DATE_FORMAT))
                identifier = '{}-{}-{}'.format(layer_name, str_mr, str_fh)
                vrt = 'vrt://{}?bands={}'.format(self.filepath, band)

                begin, end, interval = layer_config['forecast_hours'].split(
                    '/')
                interval = int(re.sub("[^0-9]", "", interval))

                feature_dict = {
                    'layer_name':
                    layer_name,
                    'filepath':
                    vrt,
                    'identifier':
                    identifier,
                    'reference_datetime':
                    reference_datetime.strftime(DATE_FORMAT),
                    'forecast_hour_datetime':
                    forecast_hour_datetime.strftime(DATE_FORMAT),  # noqa
                    'member':
                    member,
                    'model':
                    self.model,
                    'elevation':
                    elevation,
                    'expected_count':
                    None,
                    'forecast_hours': {
                        'begin': begin,
                        'end': end,
                        'interval':
                        layer_config['forecast_hours'].split('/')[2]
                    },
                    'static_model_run': {
                        'begin': layer_config['begin']
                    },
                    'layer_config':
                    layer_config,
                    'register_status':
                    True
                }

                self.items.append(feature_dict)

        return True
Example #32
0
    def parse(self):

        # Read sheet file
        sheet = open(self.sheet_loc, 'r')
        read_lines = sheet.readlines()
        sheet.close()

        # Find sheet START and END
        try:
            start_index = read_lines.index('START\n')
        except:
            raise SheetParserError(
                'No START token.',
                'Sheet should be placed between START and END')
        else:
            read_lines = read_lines[start_index + 1:]

        try:
            end_index = read_lines.index('END\n')
        except:
            raise SheetParserError(
                'No END token.',
                'Sheet should be placed between START and END')
        else:
            read_lines = read_lines[:end_index]

        # Check bpm
        bpm_parse = parse('bpm: {:d}', read_lines[0])
        if bpm_parse is None:
            raise SheetParserError(
                'No bpm.', 'bpm should be specified after START. ex) bpm: 120')
        bpm = bpm_parse[0]
        if bpm > 180:
            raise SheetParserError('Too big bpm',
                                   'bpm should be less or equal than 180.')
        if bpm < 40:
            raise SheetParserError('Too small bpm',
                                   'bpm should be greater or equal than 40')

        # Set freq
        self.freq = bpm / 60.0

        # Check bar length
        bar_parse = parse('bar: {:d}/{:d}', read_lines[1])
        if bar_parse is None:
            raise SheetParserError(
                'No bar.', 'bar should be specified after bpm. ex) bar: 4/4')
        # Calculate bar length
        base_beat = bar_parse[1]
        num_of_beat = bar_parse[0]
        self.length_of_bar = self.beat_checker(base_beat, bpm, 0) * num_of_beat

        # Exclude bpm and bar
        read_lines = read_lines[2:]

        # Find lines START and END
        lines = []  # Found lines
        line = []  # List to store notes
        line_started = False
        for i, token in enumerate(read_lines):
            # If comment, pass
            if token.startswith('#'):
                continue
            if (not line_started) and (token == 'LINESTART\n'):
                line_started = True
                line = []
            elif line_started and token == 'LINEEND\n':
                if not len(line) > 0:
                    raise SheetParserError(
                        'Line {}'.format(i),
                        'Line should consist of at least one note.')
                else:
                    line_started = False
                    lines.append(line)
            elif token == 'BARSTART\n' or token == 'BAREND\n':
                line.append(token)
            else:
                p = parse('({}, {:d})', token)
                if p is None:
                    raise SheetParserError('Line {} : {}'.format(i, token),
                                           'Note format error.')
                line.append(token)
        if line_started:
            raise SheetParserError('Line error',
                                   'Line not finished after LINESTART')
        if not len(lines) > 0:
            raise SheetParserError('Line error',
                                   'At least one line should exist.')

        # Convert lines
        converted_lines = []
        for i, line in enumerate(lines):
            converted_lines.append(self.parse_line(line, i + 1, bpm))

        # Return
        return Sheet(bpm, converted_lines)
Example #33
0
    def find_latest_remote(self):
        """
        Used to update the published dict

        CommandLine:
            python -m ibeis.algo.verif.vsone find_latest_remote

        Example:
            >>> # DISABLE_DOCTEST
            >>> from ibeis.algo.verif.vsone import *  # NOQA
            >>> self = Deployer()
            >>> task_clf_names = self.find_latest_remote()
        """
        base_url = 'https://{remote}/public/models/pairclf'.format(
            **self.publish_info)
        import requests
        import bs4
        resp = requests.get(base_url)
        soup = bs4.BeautifulSoup(resp.text, 'html.parser')
        table = soup.findAll('table')[0]

        def parse_bs_table(table):
            n_columns = 0
            n_rows = 0
            column_names = []
            # Find number of rows and columns
            # we also find the column titles if we can
            for row in table.find_all('tr'):
                td_tags = row.find_all('td')
                if len(td_tags) > 0:
                    n_rows += 1
                    if n_columns == 0:
                        n_columns = len(td_tags)
                # Handle column names if we find them
                th_tags = row.find_all('th')
                if len(th_tags) > 0 and len(column_names) == 0:
                    for th in th_tags:
                        column_names.append(th.get_text())

            # Safeguard on Column Titles
            if len(column_names) > 0 and len(column_names) != n_columns:
                raise Exception(
                    "Column titles do not match the number of columns")
            columns = column_names if len(column_names) > 0 else range(
                0, n_columns)
            import pandas as pd
            df = pd.DataFrame(columns=columns, index=list(range(0, n_rows)))
            row_marker = 0
            for row in table.find_all('tr'):
                column_marker = 0
                columns = row.find_all('td')
                for column in columns:
                    df.iat[row_marker,
                           column_marker] = column.get_text().strip()
                    column_marker += 1
                if len(columns) > 0:
                    row_marker += 1
            return df

        df = parse_bs_table(table)
        # Find all available models
        df = df[df['Name'].map(lambda x: x.endswith('.cPkl'))]
        # df = df[df['Last modified'].map(len) > 0]

        fname_fmt = self.fname_fmtstr + '.cPkl'
        task_clf_candidates = ut.ddict(list)
        import parse
        for idx, row in df.iterrows():
            fname = basename(row['Name'])
            result = parse.parse(fname_fmt, fname)
            if result:
                task_key = result.named['task_key']
                species = result.named['species']
                task_clf_candidates[(species, task_key)].append(idx)

        task_clf_fnames = ut.ddict(dict)
        for key, idxs in task_clf_candidates.items():
            species, task_key = key
            # Find the classifier most recently created
            max_idx = ut.argmax(df.loc[idxs]['Last modified'].tolist())
            fname = df.loc[idxs[max_idx]]['Name']
            task_clf_fnames[species][task_key] = fname

        print('published = ' + ut.repr2(task_clf_fnames, nl=2))
        return task_clf_fnames
Example #34
0
def run_RGBDFusion(output_root, root_path, cfg):

    o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)

    def FusionFromRGBD(frame_paths, intrinsics, cfg):
        '''
        frame_paths: [(color_path, depth_path, cam_path)]
        '''
        read = o3d.io.read_image
        volume = o3d.integration.ScalableTSDFVolume(
            voxel_length=cfg.tsdf_cubic_size / 512.0,
            sdf_trunc=0.04,
            color_type=o3d.integration.TSDFVolumeColorType.RGB8)

        # first pose is the canonical pose
        pose_base2world = np.loadtxt(frame_paths[0][2], dtype=np.float32)
        pose_world2base = np.linalg.inv(pose_base2world)

        for tup in frame_paths:
            cp, dp, camp = tup

            # current pose to canonical pose
            pose_cam2world = np.loadtxt(camp, dtype=np.float32)
            pose_cam2base = pose_world2base @ pose_cam2world
            # read rgbd image
            rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\
                read(cp), read(dp), cfg.depth_scale, cfg.depth_trunc, False)
            # fusion to canonical volume
            volume.integrate(rgbd_image, intrinsic,
                             np.linalg.inv(pose_cam2base))

        pc = volume.extract_point_cloud()
        pc.estimate_normals()
        return pc, pose_base2world

    # scene under root folders
    get_folders = lambda s: list(
        filter(lambda f: os.path.isdir(f), glob.glob(s + '/*')))
    color_id = lambda x: parse('frame-{:d}.color.png', os.path.basename(x))[0]
    depth_id = lambda x: parse('frame-{:d}.depth.png', os.path.basename(x))[0]
    pose_id = lambda x: parse('frame-{:d}.pose.txt', os.path.basename(x))[0]

    scenes = get_folders(root_path)
    for s in scenes:
        frag_counter = 0
        scene_name = os.path.basename(s)
        output_dir = os.path.join(output_root, scene_name)
        os.makedirs(output_dir, exist_ok=True)

        # scene camera intrinsics
        intrinsic = read_intrinsic(os.path.join(s, 'camera-intrinsics.txt'),
                                   cfg.width, cfg.height)

        # seq under scene folders
        for seq in get_folders(s):
            if os.path.basename(seq).startswith('seq'):
                # here contain the color, depth, pose images
                cpaths = glob.glob(os.path.join(seq, "*.color.png"))
                dpaths = glob.glob(os.path.join(seq, "*.depth.png"))
                ppaths = glob.glob(os.path.join(seq, "*.pose.txt"))

                cpaths.sort(key=color_id)
                dpaths.sort(key=depth_id)
                ppaths.sort(key=pose_id)

                # sanity check
                assert len(cpaths) == len(dpaths) and len(cpaths) == len(
                    ppaths)
                frame_paths = list(zip(cpaths, dpaths, ppaths))

                # loop over n frames for fusion
                nframes = cfg.frames_per_frag
                head = 0
                tail = min(nframes, len(cpaths))
                while tail <= len(cpaths):
                    print("Processing %d:%d/%d at scene %s..." %
                          (head, tail, len(cpaths), scene_name))
                    pc, pose = FusionFromRGBD(frame_paths[head:tail],
                                              intrinsic, cfg)

                    # ------ debug only ----------------------
                    # points = np.asarray(pc.points)[::10]
                    # points = to_hom_np(points.T).T @ pose.T
                    # points = points[:,:3]
                    # save_ply(os.path.join(output_dir, 'aligned_cloud_bin_%d.ply'%frag_counter), points)
                    # print("Successfully written fused and aligned point cloud #%d for scene %s"%(frag_counter, scene_name))
                    # ----------------------------------------

                    np.savetxt(
                        os.path.join(output_dir,
                                     'cloud_bin_%d_pose.txt' % frag_counter),
                        pose)
                    if o3d.io.write_point_cloud(
                            os.path.join(output_dir,
                                         'cloud_bin_%d.ply' % frag_counter),
                            pc):
                        print(
                            "Successfully written fused point cloud #%d for scene %s"
                            % (frag_counter, scene_name))

                    # update counter
                    frag_counter += 1
                    head = tail
                    tail += nframes

    return None
Example #35
0
def generate(filename,
             api="",
             kernel_path="",
             script_name=None,
             line_length=False,
             distributed_memory=DISTRIBUTED_MEMORY):
    '''Takes a GungHo algorithm specification as input and outputs the
    associated generated algorithm and psy codes suitable for
    compiling with the specified kernel(s) and GungHo
    infrastructure. Uses the :func:`parse.parse` function to parse the
    algorithm specification, the :class:`psyGen.PSy` class to generate
    the PSy code and the :class:`algGen.Alg` class to generate the
    modified algorithm code.

    :param str filename: The file containing the algorithm specification.
    :param str kernel_path: The directory from which to recursively
                            search for the files containing the kernel
                            source (if different from the location of the
                            algorithm specification)
    :param str script_name: A script file that can apply optimisations
                            to the PSy layer (can be a path to a file or
                            a filename that relies on the PYTHONPATH to
                            find the module).
    :param bool line_length: A logical flag specifying whether we care
                             about line lengths being longer than 132
                             characters. If so, the input (algorithm
                             and kernel) code is checked to make sure
                             that it conforms. The default is False.
    :param bool distributed_memory: A logical flag specifying whether to
                                    generate distributed memory code. The
                                    default is set in the config.py file.
    :return: The algorithm code and the psy code.
    :rtype: ast
    :raises IOError: if the filename or search path do not exist

    For example:

    >>> from generator import generate
    >>> alg, psy = generate("algspec.f90")
    >>> alg, psy = generate("algspec.f90", kernel_path="src/kernels")
    >>> alg, psy = generate("algspec.f90", script_name="optimise.py")
    >>> alg, psy = generate("algspec.f90", line_length=True)
    >>> alg, psy = generate("algspec.f90", distributed_memory=False)

    '''

    if api == "":
        api = DEFAULTAPI
    else:
        if api not in SUPPORTEDAPIS:
            raise GenerationError(
                "generate: Unsupported API '{0}' specified. Supported "
                "types are {1}.".format(api, SUPPORTEDAPIS))

    if not os.path.isfile(filename):
        raise IOError("file '{0}' not found".format(filename))
    if (len(kernel_path) > 0) and (not os.access(kernel_path, os.R_OK)):
        raise IOError("kernel search path '{0}' not found".format(kernel_path))
    try:
        from algGen import Alg
        ast, invoke_info = parse(filename,
                                 api=api,
                                 invoke_name="invoke",
                                 kernel_path=kernel_path,
                                 line_length=line_length)
        psy = PSyFactory(api, distributed_memory=distributed_memory).\
            create(invoke_info)
        if script_name is not None:
            sys_path_appended = False
            try:
                # a script has been provided
                filepath, filename = os.path.split(script_name)
                if filepath:
                    # a path to a file has been provided
                    # we need to check the file exists
                    if not os.path.isfile(script_name):
                        raise IOError(
                            "script file '{0}' not found".format(script_name))
                    # it exists so we need to add the path to the python
                    # search path
                    sys_path_appended = True
                    sys.path.append(filepath)
                filename, fileext = os.path.splitext(filename)
                if fileext != '.py':
                    raise GenerationError(
                        "generator: expected the script file '{0}' to have "
                        "the '.py' extension".format(filename))
                try:
                    transmod = __import__(filename)
                except ImportError:
                    raise GenerationError(
                        "generator: attempted to import '{0}' but script file "
                        "'{1}' has not been found".format(
                            filename, script_name))
                except SyntaxError:
                    raise GenerationError(
                        "generator: attempted to import '{0}' but script file "
                        "'{1}' is not valid python".format(
                            filename, script_name))
                if callable(getattr(transmod, 'trans', None)):
                    try:
                        psy = transmod.trans(psy)
                    except Exception:
                        exc_type, exc_value, exc_traceback = sys.exc_info()
                        lines = traceback.format_exception(
                            exc_type, exc_value, exc_traceback)
                        e_str = '{\n' +\
                            ''.join('    ' + line for line in lines[2:]) + '}'
                        raise GenerationError(
                            "Generator: script file '{0}'\nraised the "
                            "following exception during execution "
                            "...\n{1}\nPlease check your script".format(
                                script_name, e_str))
                else:
                    raise GenerationError(
                        "generator: attempted to import '{0}' but script file "
                        "'{1}' does not contain a 'trans()' function".format(
                            filename, script_name))
            except Exception as msg:
                if sys_path_appended:
                    os.sys.path.pop()
                raise msg
            if sys_path_appended:
                os.sys.path.pop()
        alg = Alg(ast, psy)
    except Exception:
        raise
    return alg.gen, psy.gen
Example #36
0
#!/usr/bin/python
"""
Purpose:
    pip install parse
"""

import parse

result = parse.parse("{greeting}, the time is {now:tt}",
                     "Hello, the time is 6:30 PM")
print(result)

print(result.named["greeting"])
Example #37
0
def _demap_project(item_id):
    result = parse.parse('P{configid}-{pid}', item_id)

    return int(result['pid'])
Example #38
0
def load(path):
    @with_pattern(r"[a-z-]+")  # match letters and dashes
    def _name_pattern(text):
        return text[:-1]  # remove trailing dash

    return [parse("{name:name}{id:d}[{hash:l}]", line, dict(name=_name_pattern)) for line in open(path).readlines()]
from sys import argv, exit
import re
from parse import parse

if len(argv) != 3:
    print('usage: {} <reference.xml> <out-txt-file>'.format(argv[0]))
    exit(1)

plugin = parse(argv[1])

functions = []
variables = []

with open(argv[2], 'w') as ftxt:
    for cmd in plugin.commands:
        if plugin.short_name:
            func = 'sim{}.{}'.format(plugin.short_name, cmd.name)
        else:
            func = '{}{}'.format(plugin.command_prefix, cmd.name)
        functions.append(func)

    for enum in plugin.enums:
        for item in enum.items:
            if plugin.short_name:
                prefix = 'sim{}.{}.'.format(plugin.short_name, enum.name)
            else:
                prefix = enum.item_prefix
            variables.append(prefix + item)

    ftxt.write('{}\n\n{}\n'.format(' '.join(functions), ' '.join(variables)))
Example #40
0
    def train(self, data):
        """Training function.

        Parameters
        ----------
        data_tr : tuple
            Training data.

        data_va : tuple
            Validation data.

        x_va : ndarray
            Validation data.

        y_va : ndarray
            Validation labels.

        """

        print("Initializing...")
        self.sess.run(tf.global_variables_initializer())
        
        # ----------------------------------------
        # Resume data if it already exists
        latest_checkpoint = tf.train.latest_checkpoint(
            self.res_dir_tr)
        b_resume = latest_checkpoint is not None
        if b_resume:
            # Restore network
            print("Restoring from {}...".format(
                self.res_dir_tr))
            self.saver_cur.restore(
                self.sess,
                latest_checkpoint
            )
            # restore number of steps so far
            step = self.sess.run(self.global_step)
            # restore best validation result
            if os.path.exists(self.va_res_file):
                with open(self.va_res_file, "r") as ifp:
                    dump_res = ifp.read()
                dump_res = parse(
                    "{best_va_res:e}\n", dump_res)
                best_va_res = dump_res["best_va_res"]
            if os.path.exists(self.va_res_file_ours_ransac):
                with open(self.va_res_file_ours_ransac, "r") as ifp:
                    dump_res = ifp.read()
                dump_res = parse(
                    "{best_va_res:e}\n", dump_res)
                best_va_res_ours_ransac = dump_res["best_va_res"]
        else:
            print("Starting from scratch...")
            step = 0
            best_va_res = -1
            best_va_res_ours_ransac = -1

        # ----------------------------------------
        if self.config.data_name.startswith("oan"):
            data_loader = iter(data["train"])
        else: 
            # Unpack some data for simple coding
            xs_tr = data["train"]["xs"]
            ys_tr = data["train"]["ys"]
            Rs_tr = data["train"]["Rs"]
            ts_tr = data["train"]["ts"]
            T1s_tr = data["train"]["T1s"]
            T2s_tr = data["train"]["T2s"]
            K1s_tr = data["train"]["K1s"]
            K2s_tr = data["train"]["K2s"]

        # ----------------------------------------
        # The training loop
        batch_size = self.config.train_batch_size
        max_iter = self.config.train_iter
        
        for step in trange(step, max_iter, ncols=self.config.tqdm_width):
            # ----------------------------------------
            # Batch construction

            # Get a random training batch
            if self.config.data_name.startswith("oan"):
                try:
                    data_dict = next(data_loader)
                except StopIteration:
                    data_loader = iter(data["train"])
                    data_dict = next(data_loader)
            
                xs_b = data_dict["xs"] 
                ys_b = data_dict["ys"]
                Rs_b = data_dict["Rs"].reshape(-1, 9)
                ts_b = data_dict["ts"].reshape(-1, 3)
                T1s_b = data_dict["T1s"] 
                T2s_b = data_dict["T2s"]
                K1s_b = data_dict["K1s"]
                K2s_b = data_dict["K2s"]
            else:
                ind_cur = np.random.choice(
                    len(xs_tr), batch_size, replace=False)
                # Use minimum kp in batch to construct the batch
                numkps = np.array([xs_tr[_i].shape[1] for _i in ind_cur])
                cur_num_kp = numkps.min()
                # Actual construction of the batch
                xs_b = np.array(
                    [xs_tr[_i][:, :cur_num_kp, :] for _i in ind_cur]
                ).reshape(batch_size, 1, cur_num_kp, 4)
                ys_b = np.array(
                    [ys_tr[_i][:cur_num_kp, :] for _i in ind_cur]
                ).reshape(batch_size, cur_num_kp, 2)
                Rs_b = np.array(
                    [Rs_tr[_i] for _i in ind_cur]
                ).reshape(batch_size, 9)
                ts_b = np.array(
                    [ts_tr[_i] for _i in ind_cur]
                ).reshape(batch_size, 3)
                if self.config.use_fundamental > 0:
                    T1s_b = np.array(
                        [T1s_tr[_i] for _i in ind_cur])
                    T2s_b = np.array(
                        [T2s_tr[_i] for _i in ind_cur])
                    K1s_b = np.array(
                        [K1s_tr[_i] for _i in ind_cur])
                    K2s_b = np.array(
                        [K2s_tr[_i] for _i in ind_cur])
            # ----------------------------------------
            # Train

            # Feed Dict
            feed_dict = {
                self.x_in: xs_b,
                self.y_in: ys_b,
                self.R_in: Rs_b,
                self.t_in: ts_b,
                self.is_training: True,
            }

            # add use_fundamental
            if self.config.use_fundamental > 0:
                feed_dict[self.T1_in] = T1s_b
                feed_dict[self.T2_in] = T2s_b
                feed_dict[self.K1_in] = K1s_b
                feed_dict[self.K2_in] = K2s_b

            # Fetch
            fetch = {
                "optim": self.optim,
                "loss": self.loss,
                "precision": self.precision,
                "recall": self.recall,
            }
            # Check if we want to write summary and check validation
            b_write_summary = ((step + 1) % self.config.report_intv) == 0
            b_validate = ((step + 1) % self.config.val_intv) == 0
            if b_write_summary or b_validate:
                fetch["summary"] = self.summary_op
                fetch["global_step"] = self.global_step
            # Run optimization
            # res = self.sess.run(fetch, feed_dict=feed_dict)
            try:
                res = self.sess.run(fetch, feed_dict=feed_dict)
            except (ValueError, tf.errors.InvalidArgumentError):
                print("Backward pass had numerical errors. "
                      "This training batch is skipped!")
                continue
            # Write summary and save current model
            if b_write_summary:
                self.summary_tr.add_summary(
                    res["summary"], global_step=res["global_step"])
                self.saver_cur.save(
                    self.sess, self.save_file_cur,
                    global_step=self.global_step,
                    write_meta_graph=False)

            # ----------------------------------------
            # Validation
            if b_validate:
                va_res = 0
                cur_global_step = res["global_step"]
                score = self.last_logit # defaul score: local attention
                if self.config.weight_opt == "sigmoid_softmax":
                    score = [self.last_logit, self.logit_softmax, self.last_weights]

                test_process_ins = [self.x_in, self.y_in, self.R_in, self.t_in, self.is_training] 

                if self.config.use_fundamental > 0:
                    test_process_ins += [self.T1_in, self.T2_in, self.K1_in, self.K2_in]
                    
                va_res, va_res_ours_ransac = test_process(
                    "valid", self.sess, cur_global_step,
                    self.summary_op, self.summary_va,
                    test_process_ins,
                    None, None, None,
                    self.logits, self.e_hat, self.loss, self.precision, self.recall,
                    self.last_e_hat, score, self.last_x_in,
                    data["valid"],
                    self.res_dir_va, self.config, True)
                # Higher the better
                if va_res > best_va_res:
                    print(
                        "Saving best model with va_res = {}".format(
                            va_res))
                    best_va_res = va_res
                    # Save best validation result
                    with open(self.va_res_file, "w") as ofp:
                        ofp.write("{:e}\n".format(best_va_res))
                    # Save best model
                    self.saver_best.save(
                        self.sess, self.save_file_best,
                        write_meta_graph=False,
                    )
                if va_res_ours_ransac > best_va_res_ours_ransac:
                    print(
                        "Saving best model with va_res_ours_ransac = {}".format(
                            va_res_ours_ransac))
                    best_va_res_ours_ransac = va_res_ours_ransac
                    # Save best validation result
                    with open(self.va_res_file_ours_ransac, "w") as ofp:
                        ofp.write("{:e}\n".format(best_va_res_ours_ransac))
                    # Save best model
                    self.saver_best.save(
                        self.sess, self.save_file_best_ours_ransac,
                        write_meta_graph=False,
                    )
Example #41
0
File: 21.py Project: thran/the_code
# start 8:23, 1. 8:45, 2. 8:55
from collections import defaultdict
from pathlib import Path

from parse import parse


all_ingredients = set()
all_allergens = set()
ingredients_list = []
with Path('input.txt').open() as file:
    for line in file:
        results = parse('{ingredients} (contains {allergens})', line.strip())
        allergens = set(results['allergens'].split(', '))
        ingredients = set(results['ingredients'].split())
        ingredients_list.append((ingredients, allergens))
        all_allergens |= allergens
        all_ingredients |= ingredients
possible_mapping = {ing: set(all_allergens) for ing in all_ingredients}


for ings, als in ingredients_list:
    for ing in all_ingredients - ings:
        possible_mapping[ing] -= als

good_ingredients = {ing for ing, als in possible_mapping.items() if len(als) == 0}
print(sum(1 for ings, _ in ingredients_list for ing in ings if ing in good_ingredients))


bad_ingredients = {ing: als for ing, als in possible_mapping.items() if als}
resolved = {}
Example #42
0
 def find_handler(self, request_path):
     for path, handler in self.routes.items():
         parse_result = parse.parse(path, request_path)
         if parse_result is not None:
             return handler, parse_result.named
     return None, None
Example #43
0
from parse import parse

LOG = '[2018-05-06T12:58:00.714611] - SALE - PRODUCT: 1345 - PRICE: $09.99'
FORMAT = '[{date}] - SALE - PRODUCT: {product} - PRICE: ${price}'

# Run parse and check the results:
result = parse(FORMAT, LOG)
# OUTPUT
# <Result () {'date': '2018-05-06T12:58:00.714611', 'product': '1345', 'price': '09.99'}>

# The results are all strings. Define the types to be parsed:
FORMAT = '[{date:ti}] - SALE - PRODUCT: {product:d} - PRICE: ${price:05.2f}'
result = parse(FORMAT, LOG)
# OUTPUT
# <Result () {'date': datetime.datetime(2018, 5, 6, 12, 58, 0, 714611), 'product': 1345, 'price': 9.99}>

# Define a custom type for the price to avoid issues with the float type:
from decimal import Decimal


def price(string):
    return Decimal(string)


FORMAT = '[{date:ti}] - SALE - PRODUCT: {product:d} - PRICE: ${price:price}'
result = parse(FORMAT, LOG, {'price': price})
print(result)
# <Result () {'date': datetime.datetime(2018, 5, 6, 12, 58, 0, 714611), 'product': 1345, 'price': Decimal('9.99')}>

print(result['date'])
# output : 2018-05-06 12:58:00.714611
Example #44
0
    def parse_line(self, line, i, bpm):
        converted_notes = []
        merge = False
        current_bar_length = 0
        bar_start = False
        for i, note in enumerate(line):
            if note == 'BARSTART\n':
                current_bar_length = 0
                if bar_start:
                    raise SheetParserError('LINE {} : {}'.format(i, note),
                                           'Bar error.')
                bar_start = True
                continue
            if note == 'BAREND\n':
                if not bar_start:
                    raise SheetParserError('LINE {} : {}'.format(i, note),
                                           'Bar error.')
                bar_start = False
                left_length = self.length_of_bar - current_bar_length
                if left_length > 0:
                    converted_notes.append(('rest', left_length, 5))
                    continue
                elif left_length == 0:
                    continue
                else:
                    raise SheetParserError(
                        'LINE {} : {}'.format(i, note),
                        'Bar length exceeds. {:d} < {:d}'.format(
                            self.length_of_bar, current_bar_length))

            # Parse note
            p = parse('({}, {:d}, {:d})', note)
            if p is None:
                p = parse('({}, {:d})', note)
                if p is None:
                    raise SheetParserError('LINE {} : {}'.format(i, note),
                                           'Note format error.')
                else:
                    tone, beat = p
                    volume = self.DEFAULT_VOLUME
            else:
                tone, beat, volume = p

            # If note need to be merged with previous note,
            if merge:
                tone = self.tone_checker(tone, i)
                prev_tone = converted_notes[-1][0]
                if tone != prev_tone:
                    raise SheetParserError(
                        'LINE {} : {}'.format(i, note),
                        'Different tones can\'t be merged.')
                beat_ms = self.beat_checker(beat, bpm, i)
                prev_note = converted_notes[-1]
                converted_notes[-1] = (prev_note[0], prev_note[1] + beat_ms,
                                       prev_note[2])
                merge = False
                current_bar_length += beat_ms

            # If note is 'rest' note,
            elif tone == 'rest':
                beat_ms = self.beat_checker(beat, bpm, i)
                converted_notes.append((tone, beat_ms, volume))
                current_bar_length += beat_ms

            # If note is merge, set merge flag & continue
            elif tone == 'MERGE':
                merge = True
                continue

            elif tone == 'ITER':
                notesnum, iternum = beat, volume
                iter_notes = converted_notes[-notesnum:]
                # Calculate iterlen
                iterlen = 0
                for note in iter_notes:
                    iterlen += note[1]
                for i in range(iternum - 1):
                    iter_notes.extend(iter_notes[0:notesnum])
                converted_notes = converted_notes[:-notesnum] + iter_notes
                current_bar_length += iterlen * (iternum - 1)

            # O.w.
            else:
                beat_ms = self.beat_checker(beat, bpm, i)
                tone = self.tone_checker(tone, i)
                converted_notes.append((tone, beat_ms, volume))
                current_bar_length += beat_ms

        return converted_notes
Example #45
0
def main():
    data = p.parse(p.MY_FILE, ",")

    return create_map(data)
Example #46
0
def convert_from_v1(hub_dict, resolution=128):
    weightname_dict = {
        'weight_u': 'u0',
        'weight_bar': 'weight',
        'bias': 'bias'
    }
    convnum_dict = {'conv0': 'conv1', 'conv1': 'conv2', 'conv_sc': 'conv_sc'}
    attention_blocknum = {128: 3, 256: 4, 512: 3}[resolution]
    hub2me = {
        'linear.weight':
        'shared.weight',  # This is actually the shared weight
        # Linear stuff
        'G_linear.module.weight_bar':
        'linear.weight',
        'G_linear.module.bias':
        'linear.bias',
        'G_linear.module.weight_u':
        'linear.u0',
        # output layer stuff
        'ScaledCrossReplicaBN.weight':
        'output_layer.0.gain',
        'ScaledCrossReplicaBN.bias':
        'output_layer.0.bias',
        'ScaledCrossReplicaBN.running_mean':
        'output_layer.0.stored_mean',
        'ScaledCrossReplicaBN.running_var':
        'output_layer.0.stored_var',
        'colorize.module.weight_bar':
        'output_layer.2.weight',
        'colorize.module.bias':
        'output_layer.2.bias',
        'colorize.module.weight_u':
        'output_layer.2.u0',
        # Attention stuff
        'attention.gamma':
        'blocks.%d.1.gamma' % attention_blocknum,
        'attention.theta.module.weight_u':
        'blocks.%d.1.theta.u0' % attention_blocknum,
        'attention.theta.module.weight_bar':
        'blocks.%d.1.theta.weight' % attention_blocknum,
        'attention.phi.module.weight_u':
        'blocks.%d.1.phi.u0' % attention_blocknum,
        'attention.phi.module.weight_bar':
        'blocks.%d.1.phi.weight' % attention_blocknum,
        'attention.g.module.weight_u':
        'blocks.%d.1.g.u0' % attention_blocknum,
        'attention.g.module.weight_bar':
        'blocks.%d.1.g.weight' % attention_blocknum,
        'attention.o_conv.module.weight_u':
        'blocks.%d.1.o.u0' % attention_blocknum,
        'attention.o_conv.module.weight_bar':
        'blocks.%d.1.o.weight' % attention_blocknum,
    }

    # Loop over the hub dict and build the hub2me map
    for name in hub_dict.keys():
        if 'GBlock' in name:
            if 'HyperBN' not in name:  # it's a conv
                out = parse.parse('GBlock.{:d}.{}.module.{}', name)
                blocknum, convnum, weightname = out
                if weightname not in weightname_dict:
                    continue  # else hyperBN in
                out_name = 'blocks.%d.0.%s.%s' % (
                    blocknum, convnum_dict[convnum],
                    weightname_dict[weightname])  # Increment conv number by 1
            else:  # hyperbn not conv
                BNnum = 2 if 'HyperBN_1' in name else 1
                if 'embed' in name:
                    out = parse.parse('GBlock.{:d}.{}.module.{}', name)
                    blocknum, gamma_or_beta, weightname = out
                    if weightname not in weightname_dict:  # Ignore weight_v
                        continue
                    out_name = 'blocks.%d.0.bn%d.%s.%s' % (
                        blocknum, BNnum, 'gain' if 'gamma' in gamma_or_beta
                        else 'bias', weightname_dict[weightname])
                else:
                    out = parse.parse('GBlock.{:d}.{}.bn.{}', name)
                    blocknum, dummy, mean_or_var = out
                    if 'num_batches_tracked' in mean_or_var:
                        continue
                    out_name = 'blocks.%d.0.bn%d.%s' % (
                        blocknum, BNnum, 'stored_mean'
                        if 'mean' in mean_or_var else 'stored_var')
            hub2me[name] = out_name

    # Invert the hub2me map
    me2hub = {hub2me[item]: item for item in hub2me}
    new_dict = {}
    dimz_dict = {128: 20, 256: 20, 512: 16}
    for item in me2hub:
        # Swap input dim ordering on batchnorm bois to account for my arbitrary change of ordering when concatenating Ys and Zs
        if ('bn' in item and 'weight' in item) and (
                'gain' in item or 'bias' in item) and ('output_layer'
                                                       not in item):
            new_dict[item] = torch.cat([
                hub_dict[me2hub[item]][:, -128:],
                hub_dict[me2hub[item]][:, :dimz_dict[resolution]]
            ], 1)
        # Reshape the first linear weight, bias, and u0
        elif item == 'linear.weight':
            new_dict[item] = hub_dict[me2hub[item]].contiguous().view(
                4, 4, 96 * 16,
                -1).permute(2, 0, 1,
                            3).contiguous().view(-1, dimz_dict[resolution])
        elif item == 'linear.bias':
            new_dict[item] = hub_dict[me2hub[item]].view(
                4, 4, 96 * 16).permute(2, 0, 1).contiguous().view(-1)
        elif item == 'linear.u0':
            new_dict[item] = hub_dict[me2hub[item]].view(
                4, 4, 96 * 16).permute(2, 0, 1).contiguous().view(1, -1)
        elif me2hub[
                item] == 'linear.weight':  # THIS IS THE SHARED WEIGHT NOT THE FIRST LINEAR LAYER
            # Transpose shared weight so that it's an embedding
            new_dict[item] = hub_dict[me2hub[item]].t()
        elif 'weight_u' in me2hub[item]:  # Unsqueeze u0s
            new_dict[item] = hub_dict[me2hub[item]].unsqueeze(0)
        else:
            new_dict[item] = hub_dict[me2hub[item]]
    return new_dict
Example #47
0
        root = None
        for token in doc:
            if token.dep_ == 'ROOT':
                root = token
        if root == None:
            break

        summary_eres.append(dep_parse(root))
    return summary_eres


if __name__ == '__main__':
    import sys
    import parse
    data = parse.parse()

    if len(sys.argv) != 2:
        print(
            "Please run command with desired conversation/summary ID, EX: 'py summ_depparse.py 0'"
        )
    elif not int(sys.argv[1]) in range(0, 45):
        print("Error with given argument")
    else:
        id = int(sys.argv[1])
        for summ in data[id].summaries:
            print("Summary:")
            print(summ)
            eres = parse_summaries(summ)
            print(eres)
            print("\n")
Example #48
0
    else:
        try:
            A=int(columns[3].text)
            Z=int(columns[0].text)
        except ValueError:
            continue
        abundance=columns[4].text.strip()
#        print("This is not a child")
    abundance=abundance.replace(" ", "");
    abundance=abundance.replace(" ", "");
#    if abundance[0]=='[':
#       p=parse("[{:f},{:f}]", abundance)
#       if not p:
#        continue
#       abundance_low=p[0]
#       abundance_high=p[1]
#       print(f"got low {abundance_low:f} and high {abundance_high:f}")
    p=parse("{:g}", abundance)
    if p:
        abundance_single=p[0]
    p=parse("{:f}({d})", abundance)
    if p:
        abundance_single=p[0]
    p=parse("[{:f},{:f}]", abundance)
    if p:
       abundance_low=p[0]
       abundance_high=p[1]
       abundance_single=(p[0]+p[1])/2 #TODO: not the greatest choice?

    print(f"{Z:3d} {A:3d} {abundance_single:5f} {abundance_low:5f} {abundance_high:5f}")
Example #49
0
    return blocks


# read a file, replace stuff, write it out
# only overwrite files if the contents will change
def make_file(f_in, f_out, **args):
    f_tmp = '%s.swp' % f_out
    t = pyratemp.Template(filename=f_in)
    with open(f_tmp, 'w') as out:
        out.write(t(**args).encode())
    if os.path.exists(f_out) and filecmp.cmp(f_tmp, f_out):
        # Nothing changed, don't touch the file.
        os.remove(f_tmp)
        return
    shutil.move(f_tmp, f_out)


devs = []

for config_file in CONFIGS:
    with open(config_file, 'r') as f:
        dev = parse.parse(f)
    cpp_name = 'devices/%s.cpp' % dev.name
    h_name = 'devices/%s.h' % dev.name
    make_file('templates/dev.cpp', cpp_name, dev=dev, blocks=make_blocks(dev))
    make_file('templates/dev.h', h_name, dev=dev, blocks=make_blocks(dev))
    devs.append(dev)

make_file('templates/devices.cpp', 'devices/devices.cpp', devs=devs)
    def identify(self, filepath, url=None):
        """
        Identifies a file of the layer

        :param filepath: filepath from AMQP
        :param url: fully qualified URL of file

        :returns: `list` of file properties
        """

        super().identify(filepath, url)

        self.model = 'radar'

        LOGGER.debug('Loading model information from store')
        self.file_dict = json.loads(self.store.get_key(self.model))

        filename_pattern = self.file_dict[self.model]['filename_pattern']

        tmp = parse(filename_pattern, os.path.basename(filepath))

        file_pattern_info = {
            'wx_variable': tmp.named['precipitation_type'],
            'time_': tmp.named['YYYYMMDDhhmm'],
        }

        LOGGER.debug('Defining the different file properties')
        self.wx_variable = file_pattern_info['wx_variable']

        if self.wx_variable not in self.file_dict[self.model]['variable']:
            msg = 'Variable "{}" not in ' 'configuration file'.format(
                self.wx_variable
            )
            LOGGER.warning(msg)
            return False

        time_format = '%Y%m%d%H%M'
        self.date_ = datetime.strptime(file_pattern_info['time_'], time_format)

        layer_config = self.file_dict[self.model]['variable'][self.wx_variable]
        layer_name = layer_config['geomet_layers']

        member = self.file_dict[self.model]['variable'][self.wx_variable][
            'members'
        ]
        elevation = self.file_dict[self.model]['variable'][self.wx_variable][
            'elevation'
        ]
        str_fh = re.sub('[^0-9]', '', self.date_.strftime(DATE_FORMAT))
        identifier = '{}-{}'.format(layer_name, str_fh)
        date_format = DATE_FORMAT

        feature_dict = {
            'layer_name': layer_name,
            'layer_config': layer_config,
            'filepath': self.filepath,
            'identifier': identifier,
            'reference_datetime': None,
            'forecast_hour_datetime': self.date_.strftime(date_format),
            'member': member,
            'model': self.model,
            'elevation': elevation,
            'expected_count': None,
            'layer_config': layer_config,
            'register_status': True,
            'refresh_config': True,
        }
        self.items.append(feature_dict)

        return True
Example #51
0
  attributes = [{'handicapped-infants': 'n', 'export-administration-act-south-africa': 'y', 'superfund-right-to-sue': 'y', 'education-spending': 'y', 'duty-free-exports': 'n', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'y', 'physician-fee-freeze': 'y', 'el-salvador-aid': 'y', 'religious-groups-in-schools': 'y', 'mx-missile': 'n', 'synfuels-corporation-cutback': '?', 'anti-satellite-test-ban': 'n', 'water-project-cost-sharing': 'y', 'crime': 'y', 'adoption-of-the-budget-resolution': 'n', 'Class': 'republican'}, 
                {'handicapped-infants': 'n', 'export-administration-act-south-africa': '?', 'superfund-right-to-sue': 'y', 'education-spending': 'y', 'duty-free-exports': 'n', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': 'y', 'el-salvador-aid': 'y', 'religious-groups-in-schools': 'y', 'mx-missile': 'n', 'synfuels-corporation-cutback': 'n', 'anti-satellite-test-ban': 'n', 'water-project-cost-sharing': 'y', 'crime': 'y', 'adoption-of-the-budget-resolution': 'n', 'Class': 'republican'}, 
                {'handicapped-infants': '?', 'export-administration-act-south-africa': 'n', 'superfund-right-to-sue': 'y', 'education-spending': 'n', 'duty-free-exports': 'n', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': '?', 'el-salvador-aid': 'y', 'religious-groups-in-schools': 'y', 'mx-missile': 'n', 'synfuels-corporation-cutback': 'y', 'anti-satellite-test-ban': 'n', 'water-project-cost-sharing': 'y', 'crime': 'y', 'adoption-of-the-budget-resolution': 'y', 'Class': 'democrat'}, 
                {'handicapped-infants': 'n', 'export-administration-act-south-africa': 'y', 'superfund-right-to-sue': 'y', 'education-spending': 'n', 'duty-free-exports': 'n', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': 'n', 'el-salvador-aid': '?', 'religious-groups-in-schools': 'y', 'mx-missile': 'n', 'synfuels-corporation-cutback': 'y', 'anti-satellite-test-ban': 'n', 'water-project-cost-sharing': 'y', 'crime': 'n', 'adoption-of-the-budget-resolution': 'y', 'Class': 'democrat'}, 
                {'handicapped-infants': 'y', 'export-administration-act-south-africa': 'y', 'superfund-right-to-sue': 'y', 'education-spending': '?', 'duty-free-exports': 'y', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': 'n', 'el-salvador-aid': 'y', 'religious-groups-in-schools': 'y', 'mx-missile': 'n', 'synfuels-corporation-cutback': 'y', 'anti-satellite-test-ban': 'n', 'water-project-cost-sharing': 'y', 'crime': 'y', 'adoption-of-the-budget-resolution': 'y', 'Class': 'democrat'}, 
                {'handicapped-infants': 'n', 'export-administration-act-south-africa': 'y', 'superfund-right-to-sue': 'y', 'education-spending': 'n', 'duty-free-exports': 'y', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': 'n', 'el-salvador-aid': 'y', 'religious-groups-in-schools': 'y', 'mx-missile': 'n', 'synfuels-corporation-cutback': 'n', 'anti-satellite-test-ban': 'n', 'water-project-cost-sharing': 'y', 'crime': 'y', 'adoption-of-the-budget-resolution': 'y', 'Class': 'democrat'}, 
                {'handicapped-infants': 'n', 'export-administration-act-south-africa': 'y', 'superfund-right-to-sue': '?', 'education-spending': 'n', 'duty-free-exports': 'y', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': 'y', 'el-salvador-aid': 'y', 'religious-groups-in-schools': 'y', 'mx-missile': 'n', 'synfuels-corporation-cutback': 'n', 'anti-satellite-test-ban': 'n', 'water-project-cost-sharing': 'y', 'crime': 'y', 'adoption-of-the-budget-resolution': 'y', 'Class': 'democrat'}, 
                {'handicapped-infants': 'n', 'export-administration-act-south-africa': 'y', 'superfund-right-to-sue': 'y', 'education-spending': 'n', 'duty-free-exports': '?', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': 'y', 'el-salvador-aid': 'y', 'religious-groups-in-schools': 'y', 'mx-missile': 'n', 'synfuels-corporation-cutback': 'n', 'anti-satellite-test-ban': 'n', 'water-project-cost-sharing': 'y', 'crime': 'y', 'adoption-of-the-budget-resolution': 'n', 'Class': 'republican'}, 
                {'handicapped-infants': 'n', 'export-administration-act-south-africa': 'y', 'superfund-right-to-sue': 'y', 'education-spending': 'y', 'duty-free-exports': 'n', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': 'y', 'el-salvador-aid': 'y', 'religious-groups-in-schools': 'y', 'mx-missile': 'n', 'synfuels-corporation-cutback': 'n', 'anti-satellite-test-ban': 'n', 'water-project-cost-sharing': 'y', 'crime': 'y', 'adoption-of-the-budget-resolution': 'n', 'Class': 'republican'}, 
                {'handicapped-infants': 'y', 'export-administration-act-south-africa': '?', 'superfund-right-to-sue': 'n', 'education-spending': 'n', 'duty-free-exports': '?', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': 'n', 'el-salvador-aid': 'n', 'religious-groups-in-schools': 'n', 'mx-missile': 'y', 'synfuels-corporation-cutback': 'n', 'anti-satellite-test-ban': 'y', 'water-project-cost-sharing': 'y', 'crime': 'n', 'adoption-of-the-budget-resolution': 'y', 'Class': 'democrat'}, 
                {'handicapped-infants': 'n', 'export-administration-act-south-africa': 'n', 'superfund-right-to-sue': 'y', 'education-spending': '?', 'duty-free-exports': 'n', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': 'y', 'el-salvador-aid': 'y', 'religious-groups-in-schools': 'n', 'mx-missile': 'n', 'synfuels-corporation-cutback': '?', 'anti-satellite-test-ban': 'n', 'water-project-cost-sharing': 'y', 'crime': 'y', 'adoption-of-the-budget-resolution': 'n', 'Class': 'republican'}, 
                {'handicapped-infants': 'n', 'export-administration-act-south-africa': '?', 'superfund-right-to-sue': 'y', 'education-spending': '?', 'duty-free-exports': '?', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': 'y', 'el-salvador-aid': 'y', 'religious-groups-in-schools': 'y', 'mx-missile': 'n', 'synfuels-corporation-cutback': 'y', 'anti-satellite-test-ban': 'n', 'water-project-cost-sharing': 'y', 'crime': 'y', 'adoption-of-the-budget-resolution': 'n', 'Class': 'republican'}, 
                {'handicapped-infants': 'n', 'export-administration-act-south-africa': '?', 'superfund-right-to-sue': 'y', 'education-spending': 'n', 'duty-free-exports': '?', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'n', 'physician-fee-freeze': 'n', 'el-salvador-aid': 'n', 'religious-groups-in-schools': 'n', 'mx-missile': 'y', 'synfuels-corporation-cutback': 'n', 'anti-satellite-test-ban': 'y', 'water-project-cost-sharing': 'y', 'crime': 'n', 'adoption-of-the-budget-resolution': 'y', 'Class': 'democrat'}, 
                {'handicapped-infants': 'y', 'export-administration-act-south-africa': '?', 'superfund-right-to-sue': 'n', 'education-spending': '?', 'duty-free-exports': 'y', 'aid-to-nicaraguan-contras': 'n', 'immigration': 'y', 'physician-fee-freeze': 'n', 'el-salvador-aid': 'n', 'religious-groups-in-schools': 'y', 'mx-missile': '?', 'synfuels-corporation-cutback': 'y', 'anti-satellite-test-ban': 'y', 'water-project-cost-sharing': 'y', 'crime': 'n', 'adoption-of-the-budget-resolution': 'y', 'Class': 'democrat'}]
  assert(ID3.findBestAttribute(attributes) == 'adoption-of-the-budget-resolution')
  print "Passed test: find_best_attribute_test()\n"



data = parse('house_votes_84.data')


find_best_attribute_test()
a = [1,2,3,4,5,6,7]
random.shuffle(a)
print a
print a[1]
random.shuffle(a)
print a
print a[1]
a[1]
Example #52
0
def _devcheck_manage_snapshots(workdir, recent=5, factor=10, dry=True):
    """
    Sometimes netharn produces too many snapshots. The Monitor class attempts
    to prevent this, but its not perfect. So, sometimes you need to manually
    clean up. This code snippet serves as a template for doing so.

    I recommend using IPython to do this following this code as a guide.
    Unfortunately, I don't have a safe automated way of doing this yet.

    The basic code simply lists all snapshots that you have. Its then your job
    to find a huerstic to remove the ones you don't need.

    Note:
        # Idea for more automatic method

        In the future, we should use monitor to inspect the critical points of
        all metric curves and include any epoch that is at those cricial
        points. A cricial point is defined as one where there is a significant
        change in trajectory. Basically, we try to fit a low-degree polynomial
        or piecewise linear function to the metric curves, and we take the
        places where there is a significant change from a global perspective.

    # Specify your workdir
    workdir = ub.truepath('~/work/voc_yolo2')
    """

    USE_RANGE_HUERISTIC = True

    run_dpath = join(workdir, 'fit', 'runs')
    snapshot_dpaths = list(glob.glob(join(run_dpath, '**/torch_snapshots')))

    all_keep = []
    all_remove = []

    for snapshot_dpath in snapshot_dpaths:
        snapshots = sorted(glob.glob(join(snapshot_dpath, '_epoch_*.pt')))
        epoch_to_snap = {
            int(parse.parse('{}_epoch_{num:d}.pt', path).named['num']): path
            for path in snapshots
        }
        existing_epochs = sorted(epoch_to_snap.keys())
        # print('existing_epochs = {}'.format(ub.repr2(existing_epochs)))
        toremove = []
        tokeep = []

        if USE_RANGE_HUERISTIC:
            # My Critieron is that I'm only going to keep the two latest and
            # I'll also keep an epoch in the range [0,50], [50,100], and
            # [100,150], and so on.
            existing_epochs = sorted(existing_epochs)
            dups = ub.find_duplicates(np.array(sorted(existing_epochs)) // factor, k=0)
            keep_idxs = [max(idxs) for _, idxs in dups.items()]
            keep = set(ub.take(existing_epochs, keep_idxs))

            keep.update(existing_epochs[-recent:])

            if existing_epochs and existing_epochs[0] != 0:
                keep.update(existing_epochs[0:1])

            print('keep = {!r}'.format(sorted(keep)))

            for epoch, path in epoch_to_snap.items():
                if epoch in keep:
                    tokeep.append(path)
                else:
                    toremove.append(path)

        print('Keep {}/{} from {}'.format(len(keep), len(existing_epochs), snapshot_dpath))
        all_keep += [tokeep]
        all_remove += [toremove]

    # print('all_keep = {}'.format(ub.repr2(all_keep, nl=2)))
    # print('all_remove = {}'.format(ub.repr2(all_remove, nl=2)))
    """
    pip install send2trash
    import send2trash
    send2trash.send2trash(path)
    """
    total = 0
    for path in ub.flatten(all_remove):
        total += os.path.getsize(path)

    total_mb = total / 2 ** 20
    if dry:
        print('Cleanup would free {!r} MB'.format(total_mb))
        print('Use -f to confirm and force cleanup')
    else:
        print('About to free {!r} MB'.format(total_mb))
        for path in ub.flatten(all_remove):
            ub.delete(path, verbose=True)
Example #53
0
input = open("input.txt").readlines()
from parse import parse

fmt = "position=<{x:6d}, {y:6d}> velocity=<{vx:3d}, {vy:2d}>"

pts = [parse(fmt, l) for l in input]

x = [pt["x"] for pt in pts]
y = [pt["y"] for pt in pts]
vx = [pt["vx"] for pt in pts]
vy = [pt["vy"] for pt in pts]

print(x[0], y[0], vx[0], vy[0])

num = 0
den = 0
for i in range(len(pts)):
    for j in range(len(pts)):
        if i == j:
            continue
        num += x[i] * (vx[i] - vx[j]) + x[j] * (vx[j] - vx[i]) + y[i] * (
            vy[i] - vy[j]) + y[j] * (vy[j] - vy[i])
        den += vx[i]**2 + vx[j]**2 - 2 * vx[i] * vx[j] + vy[i]**2 + vy[
            j]**2 - 2 * vy[i] * vy[j]

t_min = round(-num / den)
print(t_min)

result = [[x[i] + vx[i] * t_min, y[i] + vy[i] * t_min] for i in range(len(x))]

print(result[0])
Example #54
0
tn.write(passwd.encode('ascii') + b"\n")

tn.write(b"knvram -a |grep system/admin\n")
tn.write(b"uptime\n")

tn.write(b"killall telnetd\n")

tn.write(b"exit\n")
response = tn.read_all()
#print(response)

token = str(response).split('\\r\\n')
#print (token)

for i in range(len(token)):
    res = parse.parse("system/adminid={}", token[i])
    if (res):
        print("adminid:" + res[0])

    res = parse.parse("system/adminpw={}", token[i])
    if (res):
        print("adminpw:" + res[0])

    res = parse.parse("{} up {} load average: {}", token[i])
    if (res):
        print("up:" + res[1])

#tn.write(b"exit\n")
#print (response)
#print(tn.read_all().decode("ascii"))
Example #55
0
from aocd import data
from parse import parse

a = 16807
b = 48271
d = 2147483647

parsed = parse('Generator A starts with {a0:d}\nGenerator B starts with {b0:d}', data)
a0 = parsed.named['a0']
b0 = parsed.named['b0']

def gen(m, x0):
    x = x0
    while True:
        x = (x * m) % d
        yield x

def gen2(m, x0, d):
    for x in gen(m, x0):
        if x % d == 0:
            yield x

def score(gena, genb, n=40000000):
    x = 0
    for i in range(n):
        a, b = next(gena), next(genb)
        x += a & 0xffff == b & 0xffff
    return x

assert score(gen(a, 65), gen(b, 8921)) == 588
assert score(gen2(a, 65, 4), gen2(b, 8921, 8), n=5000000) == 309
Example #56
0
import os
import json

import parse

text = requests.get(
    "http://mountainview.gov/depts/comdev/planning/activeprojects/list.asp"
).text
links = re.findall(
    "http://www.mountainview.gov/civicax/filebank/blobdload.aspx\?BlobID=\d+",
    text)

os.system("mkdir -p data")

all_projects = {}
for l in [links[-1]]:
    print("Downloading: " + l)
    pdf_file = "data/" + re.search("BlobID=([\d]+)", l).group(1) + ".pdf"
    os.system("curl -L " + l + " -o " + pdf_file)
    os.system("pdftotext -layout " + pdf_file)
    txt_file = pdf_file.replace(".pdf", ".txt")
    # Clean bad UTF-8 characters
    os.system("iconv -f utf-8 -t utf-8 -c < " + txt_file + " > tmp.txt")
    os.system("mv tmp.txt " + txt_file)
    parsed = parse.parse(txt_file, l)
    for p in parsed:
        all_projects[p["title"]] = p

with open("data/projects.json", "w") as f:
    json.dump(all_projects.values(), f, sort_keys=True, indent=4)
Example #57
0
    def __init__(self,
                 id=None,
                 session=None,
                 _adaptor=None,
                 _adaptor_state={},
                 _ttype=None):
        """
        __init__(id=None, session=None)

        Create / reconnect to a resource.

        :param id: id of the resource
        :type  id: :class:`saga.Url`

        :param session: :class:`saga.Session`

        Resource class instances are usually created by calling :func:`acquire`
        on the :class:`saga.resource.Manager` class.  Already acquired resources
        are identified by a string typed identifier.  This constructor accepts
        such an identifier to create another representation of the same
        resource.  As the resource itself is new newly acquired, it can be in
        any state.  In particular, it can be in a final state, and thus be
        unusable.  Further, the resource may already have expired or failed, and
        the information about it may have been purged -- in that case the id
        will not be valid any longer, and a :class:`saga.BadParameter` exception
        will be raised.

        The session parameter is interpreted exactly as the session parameter on
        the :class:`saga.resource.Manager` constructor.
        """

        # set attribute interface properties

        import radical.saga.attributes as sa

        self._attributes_extensible(False)
        self._attributes_camelcasing(True)

        # register properties with the attribute interface

        self._attributes_register(c.ID, None, sa.ENUM, sa.SCALAR, sa.READONLY)
        self._attributes_register(c.RTYPE, None, sa.ENUM, sa.SCALAR,
                                  sa.READONLY)
        self._attributes_register(c.STATE, None, sa.ENUM, sa.SCALAR,
                                  sa.READONLY)
        self._attributes_register(c.STATE_DETAIL, None, sa.STRING, sa.SCALAR,
                                  sa.READONLY)
        self._attributes_register(c.ACCESS, None, sa.URL, sa.SCALAR,
                                  sa.READONLY)
        self._attributes_register(c.MANAGER, None, sa.URL, sa.SCALAR,
                                  sa.READONLY)
        self._attributes_register(c.DESCRIPTION, None, sa.ANY, sa.SCALAR,
                                  sa.READONLY)

        self._attributes_set_enums(c.STATE, [
            c.UNKNOWN, c.PENDING, c.ACTIVE, c.CANCELED, c.EXPIRED, c.FAILED,
            c.FINAL
        ])

        self._attributes_set_enums(c.RTYPE, [c.COMPUTE, c.STORAGE, c.NETWORK])

        self._attributes_set_getter(c.ID, self.get_id)
        self._attributes_set_getter(c.RTYPE, self.get_rtype)
        self._attributes_set_getter(c.STATE, self.get_state)
        self._attributes_set_getter(c.STATE_DETAIL, self.get_state_detail)
        self._attributes_set_getter(c.ACCESS, self.get_access)
        self._attributes_set_getter(c.MANAGER, self.get_manager)
        self._attributes_set_getter(c.DESCRIPTION, self.get_description)

        # FIXME: we need the ID to be or to include an URL, as we don't have
        # a scheme otherwise, which means we can't select an adaptor.  Duh! :-/

        # FIXME: documentation for attributes is missing.

        # param checks
        scheme = None
        if not id:
            if 'resource_schema' not in _adaptor_state:
                raise se.BadParameter("Cannot initialize resource without id" %
                                      self.rtype)
            else:
                scheme = _adaptor_state['resource_schema']
        else:
            # ID is formatted as '[manager-url]-[resource-id]'
            import parse  # FIXME: use regex to reduce number of dependencies
            res = parse.parse('[{}]-[{}]', id)
            url = ru.Url(res[0])
            scheme = url.scheme.lower()

        if not session:
            session = ss.Session(default=True)

        self._base = super(Resource, self)
        self._base.__init__(scheme,
                            _adaptor,
                            _adaptor_state,
                            id,
                            session,
                            ttype=_ttype)
Example #58
0
def processGraph(filepath, bestPart, logFile):
    statsByTol = {}
    statsByTol["timeMean"] = []
    statsByTol["timeMin"] = []
    statsByTol["coarsenLevel"] = []
    for tolerance in tolerances:
        metricsPath = "metrics/group{}.txt".format(secrets.token_urlsafe(10))
        print("running sgpar on {} with tol {}, comparing to {}, data logged in {}".format(filepath, tolerance, bestPart, metricsPath))

        err = os.system(sysCall.format(filepath, metricsPath, bestPart, tolerance))

        if(err == 256):
            print("error code: {}".format(err))
            print("error produced by:")
            print(sysCall.format(filepath, metricsPath, bestPart, tolerance))
        else:
            totalTimes = []
            coarsenTimes = []
            refineTimes = []
            multilevel = []

            cnt = 0
            with open(metricsPath) as fp:
                for line in fp:
                    parsed = parse(form, line)
                    refineData = parsed[0].split()
                    niters = refineData[0::2]
                    niters.reverse()
                    maxIterReached = refineData[1::2]
                    maxIterReached.reverse()
                    totalTimes.append(float(parsed[1]))
                    coarsenTimes.append(float(parsed[2]))
                    refineTimes.append(float(parsed[3]))
                    multilevelResults = parsed[4].split()
                    multilevelEdgeCuts = multilevelResults[0::2]
                    multilevelEdgeCuts.reverse()
                    multilevelSwaps = multilevelResults[1::2]
                    multilevelSwaps.reverse()

                    data = zip(niters, maxIterReached, multilevelEdgeCuts, multilevelSwaps)

                    dataCount = 0
                    for datum in data:
                        dataCount += 1
                        if len(multilevel) < dataCount:
                            multilevel.append(MultilevelData())
                        multilevel[dataCount-1].niters.append(int(datum[0]))
                        multilevel[dataCount-1].maxIters.append(int(datum[1]))
                        multilevel[dataCount-1].edgeCuts.append(int(datum[2]))
                        multilevel[dataCount-1].swaps.append(int(datum[3]))

                    cnt += 1

            levelCount = 0
            for level in multilevel:
                levelCount += 1
                if len(statsByTol["coarsenLevel"]) < levelCount:
                    levelStats = MultilevelStats()
                    statsByTol["coarsenLevel"].append(levelStats)
                levelStats = statsByTol["coarsenLevel"][levelCount - 1]
                levelStats.refineMean.append(mean(level.niters))
                levelStats.refineMin.append(min(level.niters))
                if len(level.niters) > 1:
                    levelStats.refineStdDev.append(stdev(level.niters))
                else:
                    levelStats.refineStdDev.append(0)
                levelStats.edgeCutMean.append(mean(level.edgeCuts))
                levelStats.edgeCutMin.append(min(level.edgeCuts))
                if len(level.edgeCuts) > 1:
                    levelStats.edgeCutStdDev.append(stdev(level.edgeCuts))
                else:
                    levelStats.edgeCutStdDev.append(0)
                levelStats.swapsMean.append(mean(level.swaps))
                levelStats.swapsMin.append(min(level.swaps))
                if len(level.swaps) > 1:
                    levelStats.swapsStdDev.append(stdev(level.swaps))
                else:
                    levelStats.swapsStdDev.append(0)
                levelStats.maxItersReached.append(sum(level.maxIters))
            statsByTol["timeMean"].append(mean(totalTimes))
            statsByTol["timeMin"].append(min(totalTimes))

    output = open(logFile, "w")
    print("tolerances: {}".format(' '.join(tolerances)), file=output)
    print("mean total time: {}".format(concatStatWithSpace(statsByTol["timeMean"])), file=output)
    print("min total time: {}".format(concatStatWithSpace(statsByTol["timeMin"])), file=output)
    printStat("coarsen level {} mean refine iterations: {}", [level.refineMean for level in statsByTol["coarsenLevel"]], output)
    printStat("coarsen level {} min refine iterations: {}", [level.refineMin for level in statsByTol["coarsenLevel"]], output)
    printStat("coarsen level {} refine iterations std deviation: {}", [level.refineStdDev for level in statsByTol["coarsenLevel"]], output)
    printStat("coarsen level {} times max iter reached: {}", [level.maxItersReached for level in statsByTol["coarsenLevel"]], output)
    printStat("coarsen level {} mean edge cut: {}", [level.edgeCutMean for level in statsByTol["coarsenLevel"]], output)
    printStat("coarsen level {} min edge cut: {}", [level.edgeCutMin for level in statsByTol["coarsenLevel"]], output)
    printStat("coarsen level {} edge cut std deviation: {}", [level.edgeCutStdDev for level in statsByTol["coarsenLevel"]], output)
    printStat("coarsen level {} mean swaps to best partition: {}", [level.swapsMean for level in statsByTol["coarsenLevel"]], output)
    printStat("coarsen level {} min swaps to best partition: {}", [level.swapsMin for level in statsByTol["coarsenLevel"]], output)
    printStat("coarsen level {} swaps to best partition std deviation: {}", [level.swapsStdDev for level in statsByTol["coarsenLevel"]], output)
    print("end {} processing".format(filepath))
Example #59
0
    def run(self):
        while True:
            request = self.socket.recv_string()
            # download filename
            if request.startswith("download"):
                filename = str(parse.parse("download {}", request)[0])
                keeper_data = self.videos.get(filename)
                if keeper_data is None:
                    self.socket.send_json(None)
                    continue
                keeper_ips = keeper_data[0]
                file_size = self.videos[filename][3]
                ips = []
                ports = []
                for ip in keeper_ips:
                    if not self.keepers[ip][-1]:
                        continue
                    for port, busy in self.keepers[ip][0].items():
                        if not busy:
                            ips.append(ip)
                            ports.append(port)
                            set_busy(self.keepers, self.lk, ip, port, True)
                            break
                self.socket.send_json((ips, ports, file_size))

            elif request.startswith("upload"):
                keepers_used_storage = {}
                for ip in self.keepers.keys():
                    if not self.keepers[ip][-1]:
                        continue
                    keepers_used_storage[ip] = 0
                for file, data in self.videos.items():
                    for ip in data[0]:
                        if not self.keepers[ip][-1]:
                            continue
                        keepers_used_storage[ip] += data[3]
                keepers_ips = [
                    ip for ip, used_storage in sorted(
                        keepers_used_storage.items(), key=lambda item: item[1])
                ]
                chosen_ip = None
                chosen_port = None
                for ip in keepers_ips:
                    if not self.keepers[ip][-1]:
                        continue
                    for port, busy in self.keepers[ip][0].items():
                        if not busy:
                            chosen_ip = ip
                            chosen_port = port
                            break
                    if chosen_ip is not None:
                        break
                if chosen_ip is not None:
                    self.socket.send_json((chosen_ip, chosen_port))
                    set_busy(self.keepers, self.lk, chosen_ip, chosen_port,
                             True)
                else:
                    self.socket.send_json(None)

            elif request.startswith("successfully_uploaded"):
                parsed = parse.parse(
                    "successfully_uploaded: {} {} {} {} {} {}", request)
                filename = str(parsed[0])
                data_keeper_ip = parsed[1]
                data_keeper_port = int(parsed[2])
                user_id = int(parsed[3])
                file_path = str(parsed[4])
                size = int(parsed[5])
                add_video(self.videos, self.lv, filename,
                          {data_keeper_ip: True}, user_id, file_path, size)
                set_busy(self.keepers, self.lk, data_keeper_ip,
                         data_keeper_port, False)
                self.socket.send_string("OK")
                print('User: {} uploaded "{}" successfully to {}:{}'.format(
                    user_id, filename, data_keeper_ip, data_keeper_port))

            elif request.startswith("successfully_downloaded"):
                parsed = parse.parse("successfully_downloaded: {} {}", request)
                data_keeper_ip = parsed[0]
                data_keeper_port = int(parsed[1])
                set_busy(self.keepers, self.lk, data_keeper_ip,
                         data_keeper_port, False)
                self.socket.send_string("OK")
Example #60
0
from parse import parse
from gm_hmm.src.genHMM import GenHMMclassifier, save_model
from gm_hmm.src.ref_hmm import GaussianHMMclassifier

if __name__ == "__main__":
    usage = "Aggregate models from several classes.\n" \
            "Usage: python bin/aggregate_models.py models/epoch1.mdl"

    if len(sys.argv) != 2 or sys.argv[1] == "-h" or sys.argv[1] == "--help":
        print(usage)
        sys.exit(1)

    out_mdl_file = sys.argv[1]

    # Find the class digit
    get_sort_key = lambda x: parse("{}class{:d}.mdlc", x)[1]
    # find the model used, 'gen' or 'gaus'
    model_type = parse("{}/exp/{}/{:d}feats/{}", os.getcwd())[1]

    # Find all trained classes submodels
    in_mdlc_files = sorted(glob.glob(
        out_mdl_file.replace(".mdl", "_class*.mdlc")),
                           key=get_sort_key)
    if model_type == 'gaus':
        mdl = GaussianHMMclassifier(mdlc_files=in_mdlc_files)
        assert (all(
            [int(h.iclass) == int(i) + 1 for i, h in enumerate(mdl.hmms)]))
        with open(out_mdl_file, "wb") as handle:
            pkl.dump(mdl, handle)

    elif model_type == 'gen':