예제 #1
0
파일: gui.py 프로젝트: cilsat/slurp
def test():
    log = lambda message: sys.stdout.write(message)
    writer = Writer('D:/exe/imod/IMOD_USER/pusair-output')

    try:
        log('Reading config... ')
        config.parse()
        if config.config['gradient'] > 1:
            raise ValueError('Maximum gradient is 1')
        log('Done\n')

        p, adj = slurp.prep(
            # fbore=str('D:/exe/imod/IMOD_USER/pusair-input/Boreholes_Dimas.ipf'),
            fbore=str('D:/exe/imod/IMOD_USER/pusair-input/Boreholes_Jakarta.ipf'),
            fscreen=str('data/well_M_z_all.ipf'),
            config=config,
            log=log)

        interpolator = Interpolator(p, adj, writer, log)
        interpolator.interpolate()

        log('\n[DONE]')
    except Exception as e:
        log('\n\n[ERROR] {}'.format(e))
        traceback.print_exc()

    writer.reset()
예제 #2
0
파일: gui.py 프로젝트: cilsat/slurp
    def run(self):
        log = lambda message: self.emit(SIGNAL('logging(QString)'), message)
        writer = Writer(str(self.text_output))

        try:
            log('Reading config... ')
            config.parse()
            if config.config['gradient'] > 1:
                raise ValueError('Maximum gradient is 1')
            log('Done\n')

            p, adj = slurp.prep(
                fbore=str(self.text_input),
                fscreen=str(self.text_screen),
                config=config,
                log=log)

            interpolator = Interpolator(p, adj, writer, log)
            interpolator.interpolate()

            log('\n[DONE]')
        except Exception as e:
            log('\n\n[ERROR] {}'.format(e))
            traceback.print_exc()

        self.emit(SIGNAL('finish_program()'))
예제 #3
0
def main():
    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('-c', dest='config')
    parser.add_argument('-f', dest='test_file')
    parser.add_argument('-e', dest='epoch', type=int)
    args = parser.parse_args()
    config.parse(args.config)
    config.cuda = False
    vocab = load_vocabulary()
    model = build_model(len(vocab.word2index),
                        load_ckpt=True,
                        ckpt_epoch=args.epoch)
    config.use_cuda = False
    model.cpu()
    bot = BotAgent(model, vocab)
    if args.test_file is not None:
        with open(args.test_file) as file:
            question_list = []
            for line in file:
                question_list.append(line[:-1])
            for question in question_list:
                print('> %s' % question)
                print('bot: %s' % bot.response(question))
    else:
        while True:
            user_input = input('me: ')
            if user_input.strip() == '':
                continue
            print('%s: %s' % ('bot', bot.response(user_input)))
예제 #4
0
def main(host, port):
    config.parse()
    server = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
    server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    server.bind((host, port))
    server.listen(64)

    while True:
        (client, address) = server.accept()
        handle(client)
        client.close()
예제 #5
0
파일: main.py 프로젝트: balamark/CollabMeet
def main():  
  if len(sys.argv) != 5: 
    print '<usage> name ip port videoport'
    sys.exit(1)
  
  instance.name = sys.argv[1]
  instance.local_ip = sys.argv[2]
  instance.listen_port = int(sys.argv[3])
  instance.video_port = int(sys.argv[3])
  
  config.parse()
  
  if instance.curr_master == instance.name:
    master.init_master()
  else:
    network.join_meeting()
  
  instance.initialized = True
  
  api.init_gui()
  print "### done init_gui() ###"
  api.init_video_module()
  
  instance.last_heartbeat_rcvd = time.time()
  instance.master_thread = master.MasterThread()
  
  print "### 1 ###"
  if instance.curr_video_port != 0:
    api.connect_to_video_server(instance.curr_video_name, instance.curr_video_ip, instance.curr_video_port)
  
  s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  network.ListeningThread(s)
  network.ConnectingThread()

  print "### 2 ###"
  print "### I am up ###" 
  while 1:
    try:
      command = raw_input(">>>")
      if len(command) == 0:
        continue
      commands = command.split(':')
      if commands[0] == 'q':
        instance.has_exited = True
        network.close_connections()
        sys.exit(0)
      elif commands[0] == 'text':
        api.send_text_msg(commands[1])
      elif commands[0] == 'video':
        api.send_video_req()
      else:
        print 'This command is not supported'
    except KeyboardInterrupt:
      sys.exit(1)
예제 #6
0
파일: sync.py 프로젝트: mor1/py-perscon
def main():
    uri = "http://localhost:5985/"
    Perscon_utils.init_url (uri)

    configfile = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                            "..", "..", "perscon", "perscon.conf")
    config.parse(configfile)
    base = config.get('photofiles', 'base')
    print "base dir is %s" % base

    for root, dirs, files in os.walk(base):
      for f in files:
        skip = False
        fname = os.path.join(root, f)
        meta = {}
        root_name,ext = os.path.splitext(fname)
        fin = open(fname, 'rb')
        try:
          print "reading %s" % fname
          data = fin.read()
          fin.seek(0)
          exif_tags = EXIF.process_file(fin)
        except:
          print >> sys.stderr, "error reading: %s" % fname
          skip = True
        finally:
          fin.close()
        if skip or (exif_tags == {}):
          print "skipping"
          continue
        if exif_tags.has_key('EXIF DateTimeOriginal'):
          raw = str(exif_tags['EXIF DateTimeOriginal'])
          tm = dateutil.parser.parse(raw)
          tt = tm.timetuple()
        else:
          tt = datetime.fromtimestamp(os.path.getmtime(fname)).timetuple()
        tstamp = time.mktime(tt)
        guid = hashlib.md5(file(fname).read()).hexdigest()
        uid = guid + ext
        m = { 'type':'org.perscon.photofiles', 'mtime':tstamp, 'att': [uid], 'uid': guid, 'frm': [], 'to':[] }
#        rpath = relpath(root,base)
        print base
        print fname
        m['caption'] = os.path.join(base, os.path.basename(fname))
        mime,mime_enc = mimetypes.guess_type(fname)
        Perscon_utils.rpc('att/'+uid, headers={'content-type': mime,'content-length': len(data)}, data=data)
        meta['file_path'] = fname
        m['meta'] = meta
        mj = simplejson.dumps(m, indent=2)
        print mj
        Perscon_utils.rpc('thing/' + uid, data=mj)
 def setUpClass (cls):
     config_dict      = config.parse()
     cls.USER_ID      = config_dict['user_id']
     cls.SNIPPET_ID   = config_dict['snippet_id']
     cls.API_URL      = config_dict['url'] + '/users'
     cls.ACCESS_TOKEN = config_dict['access_token']
     cls.SNIPPET      = json.dumps(config_dict['snippet'])
예제 #8
0
def main():
  import argparse
  parser = argparse.ArgumentParser(description='rCk')
  parser.add_argument('--config', metavar='PATH', type=str, help='path to configuration file', default='/etc/rcheck/deploy.yml')
  parser.add_argument('--tag-prefix', metavar='PREFIX', type=str, help='prefix of the tag name', default='v')
  args = parser.parse_args()

  if not os.path.isfile(args.config):
    print('File "{0}" does not exist.'.format(args.config))
    exit(1)

  yamltree = {}
  with open(args.config, 'r') as stream:
    yamltree = yaml.load(stream)
  config.verify(yamltree)
  cfg = config.parse(yamltree)

  for environment in cfg.environments:
    print('Initializing environment "{0}" at "{1}"'.format(environment.name, environment.path))
    repo = git.try_clone(environment.path , cfg.giturl)
    repo.fetch()
    tags = repo.tag_list()
    spec = semver.Spec(environment.ref)
    last_tag_to_match = ''
    for tag in tags:
      version = semver.Version(tag)
      if spec.match(version):
        last_tag_to_match = tag
    if last_tag_to_match == '':
      print('- No tag match for ref spec "{0}"'.format(environment.ref))
    else:
      print('- Checkout tag {0} for environment {1}'.format(last_tag_to_match, environment.name))
      repo.checkout(last_tag_to_match)
예제 #9
0
def handler(eniron, start_response):
    prefix = eniron.get('PATH_PREFIX', None)
    uri = eniron['PATH_INFO']
    if prefix and uri.startswith(prefix):
        uri = uri[len(prefix):]
    match = canteen_request.match(uri)
    if not match:
        start_response("404 Wrong Path", [("Content-type", 'application/xml; charset=utf-8')])
        return ['<xml version="1.0"><info>{provider}/{canteen}/{feed}.xml</info></xml>']
    request = utils.Request(eniron)
    try:
        content = parse(request, *(match.group('dirs').split('/') + [match.group('file')]))
        content = content.encode('utf8')
        start_response('200 OK', [('Content-Type', 'application/xml; charset=utf-8'),
                                  ('Content-Length', str(len(content)))])
        return (content,)
    except utils.Redirect as e:
        start_response('301 Permanent Redirect', [('Location', e.location)])
        return ('',)
    except utils.ParserNotFound as e:
        start_response('404 Parser not found', [('Content-Type', 'text/plain; charset=utf-8')])
        return (e.reason,)
    except utils.SourceNotFound as e:
        start_response('404 Source not found', [('Content-Type', 'text/plain; charset=utf-8')])
        return (e.reason,)
    except utils.FeedNotFound as e:
        start_response('404 Feed not found', [('Content-Type', 'text/plain; charset=utf-8')])
        return (e.reason,)
    except utils.NotFoundError as e:
        start_response('404 Unknown file format', [('Content-Type', 'text/plain; charset=utf-8')])
        return (e.reason,)
    except Exception:
        traceback.print_exception(*sys.exc_info())
        start_response('500 Internal Server Error', [])
        return ('', )
예제 #10
0
def handler(eniron, start_response):
    prefix = eniron.get('PATH_PREFIX', None)
    uri = eniron['PATH_INFO']
    if prefix and uri.startswith(prefix):
        uri = uri[len(prefix):]
    match = canteen_request.match(uri)
    if not match:
        start_response("404 Wrong Path", [("Content-type", 'application/xml; charset=utf-8')])
        return ['<xml version="1.0"><info>{provider}/{canteen}.xml</info></xml>']
    elif match.group('provider') not in providers:
        start_response('404 Provider not found', [])
    elif match.group('canteen') not in providers[match.group('provider')]['canteens']:
        start_response('404 Canteen not found', [])
    else:
        try:
            content = parse(match.group('provider'), match.group('canteen'),
                            bool(match.group('today')))
        except Exception:
            traceback.print_exception(*sys.exc_info())
            start_response('500 Internal Server Error', [])
            return
        content = content.encode('utf8')
        start_response('200 OK', [('Content-Type', 'application/xml; charset=utf-8'),
                                  ('Content-length', str(len(content)))])
        return (content,)
예제 #11
0
파일: upgrade.py 프로젝트: thwarted/swup
 def __init__(self, configdict=None):
     "Set up configuration and run initialization functions."
     if not configdict:
         import config
         self.config = config.parse()
     else:
         self.config = configdict
     self.init()
예제 #12
0
 def __init__(self, configdict=None):
     "Set up configuration and run initialization functions."
     if not configdict:
         import config
         self.config = config.parse()
     else:
         self.config = configdict
     self.init()
예제 #13
0
def main() -> None:
    conf = config.parse(sys.argv)
    slots = cgroup.SlotManager(conf.limit)
    if not slots.available() and conf.limit.any_set():
        print("W: limits requested but cgroups are not available",
              file=sys.stderr,
              flush=True)
    start_web(conf, slots)
예제 #14
0
파일: sync.py 프로젝트: avsm/perscon
def main(argv = None):
  """ main entry point """

  configfile = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                            "..", "..", "perscon", "perscon.conf")
  config.parse(configfile)
  service = "google.com"
  username = config.user(service)
  password = keyring.get_password(service, username)

  gd_client = gdata.photos.service.PhotosService()
  gd_client.email = username
  gd_client.password = password
  gd_client.source = 'py-perscon-v01'
  gd_client.ProgrammaticLogin()

  uri = "http://localhost:5985/"
  Perscon_utils.init_url(uri)

  #####
  # Get pictures from Picasa
  #####

  albums = gd_client.GetUserFeed(user=username)
  # iterate over albums
  for album in albums.entry:
    print 'title: %s, number of photos: %s, id: %s' % (album.title.text,
      album.numphotos.text, album.gphoto_id.text)
    album_id = album.gphoto_id.text
    # iterate over pictures
    photos = gd_client.GetFeed('/data/feed/api/user/%s/albumid/%s?kind=photo' % 
      (username, album_id))
    for photo in photos.entry:
      print 'Photo title:', photo.title.text
      image_url = photo.content.src
      uid = photo.gphoto_id.text
      mime,mime_enc = mimetypes.guess_type(photo.content.src)
      if not mime:
         mime = 'application/octet-stream'
      fin = urllib2.urlopen(image_url)
      data = fin.read()
      fin.close()
      Perscon_utils.rpc('att/'+uid, headers={'Content-type':mime,'Content-length':len(data)}, data=data)
      tstamp = photo.timestamp.text
      m = {'origin':'com.google.picasa', 'mtime':tstamp, 'att': [uid], 'uid': uid, 'tags':[] }
      meta={}
예제 #15
0
def main():
    # 解析参数
    config.parse()
    args = config.args
    for k, v in vars(args).items():
        logger.info(f"{k}:{v}")

    # 解析参数, 判断使用的设备
    device, n_gpu = args_check(args)
    os.makedirs(args.output_dir, exist_ok=True)
    forward_batch_size = int(args.train_batch_size /
                             args.gradient_accumulation_steps)
    args.forward_batch_size = forward_batch_size

    # 加载student的配置文件, 校验最大序列长度小于我们的配置中的序列长度
    bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)
    assert args.max_seq_length <= bert_config_S.max_position_embeddings

    # 准备task
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
    # 所有的labels
    label_list = processor.get_labels()
    num_labels = len(label_list)

    # 读取数据
    tokenizer = BertTokenizer(vocab_file=args.vocab_file,
                              do_lower_case=args.do_lower_case)

    eval_dataset = load_and_cache_examples(args,
                                           args.task_name,
                                           tokenizer,
                                           evaluate=True)
    logger.info("评估数据集已加载")

    model_S = BertSPCSimple(bert_config_S, num_labels=num_labels, args=args)
    # 加载student模型
    assert args.tuned_checkpoint_S is not None
    state_dict_S = torch.load(args.tuned_checkpoint_S, map_location='cpu')
    model_S.load_state_dict(state_dict_S)
    logger.info("Student模型已加载")

    # 开始预测
    res = predict(model_S, eval_dataset, args=args)
    print(res)
예제 #16
0
def main():
  """Runs the pipsource command utility."""
  args = parser.parse_args()

  reqs = requirements.parse(os.path.expanduser(args.requirements_file))
  configs = config.parse(os.path.expanduser(args.config))

  if args.command == 'vendor':
    _run_vendor(reqs, configs)
예제 #17
0
 def setUpClass(cls):
     config_dict = config.parse()
     cls.USER_ID = config_dict['user_id']
     cls.SNIPPET_ID = config_dict['snippet_id']
     cls.API_URL = config_dict[
         'url'] + '/users/' + cls.USER_ID + '/snippets/' + cls.SNIPPET_ID
     cls.INVALID_SNIPPET_KEY = config_dict['invalid_snippet_key']
     cls.INVALID_SNIPPET_URL = '%s/users/%s/snippets/%s' % (
         config_dict['url'], cls.USER_ID, cls.INVALID_SNIPPET_KEY)
     cls.ACCESS_TOKEN = config_dict['access_token']
     cls.SNIPPET = json.dumps(config_dict['snippet'])
예제 #18
0
파일: sync.py 프로젝트: mor1/py-perscon
def main(argv = None):
    """ main entry point """

    configfile = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              "..", "..", "perscon", "perscon.conf")
    config.parse(configfile)
    service = "google.com"
    username = config.user(service)
    password = keyring.get_password(service, username)

    gd_client = gdata.docs.service.DocsService(source='py-perscon-v01')
    gd_client.ClientLogin(username, password)

    uri = "http://localhost:5985/"
    Perscon_utils.init_url (uri)

    feed = gd_client.GetDocumentListFeed()
    if not feed.entry:
      print 'No items found.\n'
    for entry in feed.entry:
      parseObject(entry, gd_client)
예제 #19
0
파일: test_config.py 프로젝트: mvdnes/mdms
 def test_ok(self):
     v = config.parse("tests/config_ok.toml")
     expected = {
         'database': {
             'type': 'sqlite',
             'sqlite': {'file': 'dbmdms.db'},
         },
         'filesystem': {
             'location': 'data'
         },
     }
     self.assertEqual(v, expected)
예제 #20
0
def main():
    args = run_argparse()
    configure_log(args.log)
    random.seed(args.seed)

    dir_path = pathlib.Path(args.dir)
    dom = domain.parse(dir_path)
    default_confs = config.parse(dir_path)

    if args.all:
        do_all(dom, dir_path)
    else:
        do_iter(dom, dir_path, args.iter)
예제 #21
0
 def runTest(self):
     SunnyDayTest.runTest(self)
     cfg = os.path.join(ACCUMULO_HOME, 'conf', SITE)
     import config
     dir = config.parse(cfg)['instance.dfs.dir']
     handle = self.runOn(self.masterHost(),
                         [self.accumulo_sh(),
                          'org.apache.accumulo.core.file.rfile.PrintInfo',
                          dir + '/tables/1/default_tablet/F0000000.rf'])
     out, err = handle.communicate()
     self.assert_(handle.returncode == 0)
     self.assert_(out.find('Locality group         : g1') >= 0)
     self.assert_(out.find('families      : [colf]') >= 0)
예제 #22
0
def test():
    import sys
    import slurp
    from writer import Writer
    from config import parse

    parse()

    w, p = slurp.getBores(soilmap=config['soil'])
    p.dropna(inplace=True)
    p['rh'] = p['r']*config['buffersize'] # r horizontal

    # set minimum r horizontal
    rh_min = 1.6*config['cellsize']
    p.set_value(p['rh'] < rh_min, 'rh', rh_min)
    adj = slurp.getGroupies(p, config['gradient'], config['buffersize'])

    writer = Writer('data/sampah')
    log = lambda message: sys.stdout.write(message)
    interpolator = Interpolator(p, adj, writer, log)

    return interpolator.interpolate()
예제 #23
0
 def test_ok(self):
     v = config.parse("tests/config_ok.toml")
     expected = {
         'database': {
             'type': 'sqlite',
             'sqlite': {
                 'file': 'dbmdms.db'
             },
         },
         'filesystem': {
             'location': 'data'
         },
     }
     self.assertEqual(v, expected)
예제 #24
0
 def runTest(self):
     SunnyDayTest.runTest(self)
     cfg = os.path.join(ACCUMULO_HOME, 'conf', SITE)
     import config
     dir = config.parse(cfg)['instance.dfs.dir']
     handle = self.runOn(self.masterHost(), [
         self.accumulo_sh(),
         'org.apache.accumulo.core.file.rfile.PrintInfo',
         dir + '/tables/1/default_tablet/F0000000.rf'
     ])
     out, err = handle.communicate()
     self.assert_(handle.returncode == 0)
     self.assert_(out.find('Locality group         : g1') >= 0)
     self.assert_(out.find('families      : [colf]') >= 0)
예제 #25
0
def generate():
    confDict = config.parse("./conf.ini")
    parser = xml.sax.make_parser()
    parser.setFeature(xml.sax.handler.feature_namespaces, 0)
    generator = CodeGenerator.CodeGenerator(confDict["xml_path"], confDict["java_code"], confDict["java_package"])
    for xmlFile in confDict["xml_files"]:
        xmlHandler = XmlHandler()
        parser.setContentHandler(xmlHandler)
        parser.parse(os.path.join(confDict["xml_path"], xmlFile))
        dataTypes = xmlHandler.getDataTypes()
        if dataTypes:
            xmlFileName = os.path.splitext(xmlFile)[0]
            generator.generateEntityFile(xmlFileName, dataTypes)
            generator.generateConfigFile(xmlFileName)
            generator.generateParserFile(xmlFileName)
예제 #26
0
def main():
    global loop
    global monitor

    config.parse()
    mod_drbd.load()

    if config.options.daemonize:
        daemonize()

    create_pidfile()

    config.show()
    mod_plugins.show()
    mod_drbd.show()

    mod_plugins.loadQuorum(config.quorum_plugin)
    mod_plugins.loadSwitcher(config.switcher_plugin)
    mod_plugins.loadNotifier(config.notifier_plugin)

    print

    signal.signal(signal.SIGINT,  signal_handler)
    signal.signal(signal.SIGTERM, signal_handler)

    listener = mod_listener.Listener()
    pinger   = mod_pinger.Pinger()
    monitor  = mod_monitor.Monitor(listener, pinger)

    listener.setMonitor(monitor)
    pinger.setMonitor(monitor)

    monitor.start()

    while loop:
        time.sleep(.1)
예제 #27
0
def handler(eniron, start_response):
    prefix = eniron.get('PATH_PREFIX', None)
    uri = eniron['PATH_INFO']
    if prefix and uri.startswith(prefix):
        uri = uri[len(prefix):]
    match = canteen_request.match(uri)
    if not match:
        start_response("404 Wrong Path",
                       [("Content-type", 'application/xml; charset=utf-8')])
        return [
            '<xml version="1.0"><info>{provider}/{canteen}/{feed}.xml</info></xml>'
        ]
    request = utils.Request(eniron)
    try:
        content = parse(
            request, *(match.group('dirs').split('/') + [match.group('file')]))
        content = content.encode('utf8')
        start_response('200 OK',
                       [('Content-Type', 'application/xml; charset=utf-8'),
                        ('Content-Length', str(len(content)))])
        return (content, )
    except utils.Redirect as e:
        start_response('301 Permanent Redirect', [('Location', e.location)])
        return ('', )
    except utils.ParserNotFound as e:
        start_response('404 Parser not found',
                       [('Content-Type', 'text/plain; charset=utf-8')])
        return (e.reason, )
    except utils.SourceNotFound as e:
        start_response('404 Source not found',
                       [('Content-Type', 'text/plain; charset=utf-8')])
        return (e.reason, )
    except utils.FeedNotFound as e:
        start_response('404 Feed not found',
                       [('Content-Type', 'text/plain; charset=utf-8')])
        return (e.reason, )
    except utils.NotFoundError as e:
        start_response('404 Unknown file format',
                       [('Content-Type', 'text/plain; charset=utf-8')])
        return (e.reason, )
    except Exception:
        traceback.print_exception(*sys.exc_info())
        start_response('500 Internal Server Error', [])
        return ('', )
예제 #28
0
def show_people():
    """
    Show people is meant to serve as an auto-complete route
    :return:
    """
    conn = get_conn(parse('config.yml', ingest))
    logger = get_logger

    try:

        search = request.args.get('q', '')

        if search is '':
            return dumps(findAll(conn))
        else:
            return dumps(findOneOrNone(conn, search))

    except Exception as error:
        logger.debug(error)
        logger.debug(traceback.format_exc())
예제 #29
0
    def runTest(self):
        if not os.getenv("ZOOKEEPER_HOME"):
            self.fail(
                "ZOOKEEPER_HOME environment variable is not set please set the location of ZOOKEEPER home in this environment variable"
            )
        self.waitForStop(self.ingester, self.waitTime())
        cfg = os.path.join(ACCUMULO_HOME, 'conf', SITE)
        import config
        dir = config.parse(cfg)['instance.dfs.dir']

        handle = self.runOn(self.masterHost(),
                            ["hadoop", "dfs", "-ls", dir + "/instance_id"])
        out, err = handle.communicate()
        out = out.strip()
        instance_id = out.split("\n")[-1].split("/")[-1]
        zkcli = os.path.join(os.getenv("ZOOKEEPER_HOME"), "bin", "zkCli.sh")
        if not os.path.exists(zkcli):
            zkcli = "/usr/share/zookeeper/bin/zkCli.sh"
        myaddr = socket.getaddrinfo(socket.gethostname(), None)[0][-1][0]
        self.wait(
            self.runOn(self.masterHost(), [
                zkcli, "-server", "localhost", "create", "-s",
                "/accumulo/%s/tservers/%s:%s/zlock-0" %
                (instance_id, myaddr, 42000), "tserver", "world:anyone:cdrw"
            ]))
        self.stop_tserver(self.masterHost(), signal=signal.SIGKILL)
        log.info("Making sure the tablet server is still considered online")
        handle = self.runClassOn(
            self.masterHost(),
            'org.apache.accumulo.server.test.GetMasterStats', [])
        out, err = handle.communicate()
        tServer = "%s:%s" % (myaddr, 39000 + FUZZ)
        assert out.find(tServer) > 0
        self.sleep(12)
        log.info("Making sure the tablet server is now offline")
        handle = self.runClassOn(
            self.masterHost(),
            'org.apache.accumulo.server.test.GetMasterStats', [])
        out, err = handle.communicate()
        assert (out.find(tServer) < 0) or (out.find(tServer) >
                                           out.find('Bad servers'))
예제 #30
0
    def __init__(self):

        help(config._config)

        arguments = self.parse_arguments()

        if arguments.print_config:
            print(config.print(config.default_config()))
            return

        if not arguments.config_file:
            print("Must specify configuration file via -c. If no configuration"
                  + " file exists, you can generate a blank one with the -p"
                  + " flag")
            return

        try:
            self.config = config.parse(arguments.config_file)
        except config.BadConfiguration:
            print("Your configuration file is invalid. To generate a new,"
                  + " blank configuration, use the -p flag.")
예제 #31
0
파일: swup.py 프로젝트: thwarted/swup
def setup(argdict):
    """Analyze argument dictionary, return (mode, args, config)
    where mode is a string, args is a list of strings and config is
    a dictionary of configuration variables with variable name as key."""

    # did user request help or version info?
    if argdict.has_key("--help"):
        return ("help", None, None)
    if argdict.has_key("--version"):
        return ("version", None, None)

    # get config
    if argdict.has_key("--config-file"):
        config_file = argdict["--config-file"][0]
    else:
        config_file = config.DEFAULT_CONFIG_FILE

    configuration = None
    try:
        configuration = config.parse(config_file)
    except config.ConfigParseError, message:
        raise SetupException, "%s\n%s" % ("Error in config file %s." % config_file, "Parser said %s." % message)
예제 #32
0
파일: swup.py 프로젝트: thwarted/swup
def setup(argdict):
    '''Analyze argument dictionary, return (mode, args, config)
    where mode is a string, args is a list of strings and config is
    a dictionary of configuration variables with variable name as key.'''

    # did user request help or version info?
    if argdict.has_key('--help'):
        return ('help', None, None)
    if argdict.has_key('--version'):
        return ('version', None, None)

    # get config
    if argdict.has_key('--config-file'):
        config_file = argdict['--config-file'][0]
    else:
        config_file = config.DEFAULT_CONFIG_FILE

    configuration = None
    try:
        configuration = config.parse(config_file)
    except config.ConfigParseError, message:
        raise SetupException, '%s\n%s' % \
              ('Error in config file %s.' % config_file,
               'Parser said %s.' % message)
예제 #33
0
파일: skit.py 프로젝트: Tubebaum/pltcatan
def compile(file, clean=False, as_name=None):
    '''
    Cleans tmp/ directory and reinitializes with compiled skit code
    '''
    full_file = os.path.dirname(file) + '/'
    base_file = os.path.basename(file)
    compile_file = 'tmp/' + base_file
    if clean:
        shutil.rmtree('tmp/', True)
        compile('default.skit')
    file = open(file, 'r').read()
    file = imports(full_file, file)
    skit, succeeded = config.parse(file)
    main_property = os.path.splitext(base_file)[0]
    extend(skit)
    if as_name:
        properties[as_name] = skit
        main_property = as_name
    else:
        properties[main_property] = skit.get(main_property)
    if not os.path.isdir('tmp/'):
        os.makedirs('tmp/')
    pickle.dump(skit, open(compile_file, 'wb'))
    return skit, succeeded
예제 #34
0
LOG_GENERIC = os.path.join(ACCUMULO_HOME, 'conf', 'generic_logger.xml')
LOG_MONITOR = os.path.join(ACCUMULO_HOME, 'conf', 'monitor_logger.xml')
General_CLASSPATH = ("$ACCUMULO_HOME/lib/[^.].$ACCUMULO_VERSION.jar, $ACCUMULO_HOME/lib/[^.].*.jar, $ZOOKEEPER_HOME/zookeeper[^.].*.jar,"
"$HADOOP_HOME/conf,$HADOOP_HOME/[^.].*.jar, $HADOOP_HOME/lib/[^.].*.jar") 

log = logging.getLogger('test.auto')

ROOT = 'root'
ROOT_PASSWORD = '******'
INSTANCE_NAME=ID
ZOOKEEPERS = socket.getfqdn()

accumulo_site = os.path.join(ACCUMULO_HOME, 'conf', 'accumulo-site.xml')
if os.path.exists(accumulo_site):
   import config
   ZOOKEEPERS = config.parse(accumulo_site).get('instance.zookeeper.host', ZOOKEEPERS)

class Popen(BasePopen):
   def __init__(self, cmd, **args):
      self.cmd = cmd
      BasePopen.__init__(self, cmd, **args)

def quote(cmd):
   result = []
   for part in cmd:
      if '"' in part:
         result.append("'%s'" % part)
      else:
         result.append('"%s"' % part)
   return result
예제 #35
0
    def execute(self, **kwargs):
        self.conf = config.parse()
        self.prepare_environment()

        self.build_infra()
예제 #36
0
파일: main.py 프로젝트: mor1/py-perscon
def usage():
    print "Usage: %s [-c <config>]" % sys.argv[0]
    sys.exit(2)

def main():
    try: opts, args = getopt.getopt(
        sys.argv[1:], "hsc:", ["help", "secure", "config="])
    except getopt.GetoptError, err:
        print str(err)
        usage()
    
    configfile = "perscon.conf"
    https = False
    for o, a in opts:
        if o in ("-c", "--config"): configfile = a
        elif o in ("-s", "--secure"): https = True
        elif o in ("-h", "--help"): usage()
        else: usage()

    config.parse(configfile)
    db.open()
    port = config.port()
    
    print "Listening on port %d" % port
    if https: server = SecureHTTPServer(('', port), PersconHandler)
    else: server = HTTPServer(('', port), PersconHandler)
    
    server.serve_forever()

if __name__ == '__main__': main()
예제 #37
0
def main():
    #parse arguments
    config.parse()
    args = config.args
    for k, v in vars(args).items():
        logger.info(f"{k}:{v}")
    #set seeds
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed_all(args.random_seed)
    np.random.seed(args.random_seed)
    random.seed(args.random_seed)

    #arguments check
    device, n_gpu = args_check(args)
    os.makedirs(args.output_dir, exist_ok=True)
    forward_batch_size = int(args.train_batch_size /
                             args.gradient_accumulation_steps)
    args.forward_batch_size = forward_batch_size

    #load bert config
    bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)
    assert args.max_seq_length <= bert_config_S.max_position_embeddings

    #Prepare GLUE task
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
    # eg: MNLI,['contradiction', 'entailment', 'neutral'] --> [“矛盾”,“必然”,“中立”]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    #read data
    train_dataset = None
    eval_datasets = None
    num_train_steps = None
    tokenizer = BertTokenizer(vocab_file=args.vocab_file,
                              do_lower_case=args.do_lower_case)
    # 加载数据集, 计算steps
    if args.do_train:
        train_dataset = load_and_cache_examples(args,
                                                args.task_name,
                                                tokenizer,
                                                evaluate=False)
        if args.aux_task_name:
            aux_train_dataset = load_and_cache_examples(args,
                                                        args.aux_task_name,
                                                        tokenizer,
                                                        evaluate=False,
                                                        is_aux=True)
            train_dataset = torch.utils.data.ConcatDataset(
                [train_dataset, aux_train_dataset])
        num_train_steps = int(
            len(train_dataset) / args.train_batch_size) * args.num_train_epochs
    if args.do_predict:
        eval_datasets = []
        eval_task_names = ("mnli",
                           "mnli-mm") if args.task_name == "mnli" else (
                               args.task_name, )
        for eval_task in eval_task_names:
            eval_datasets.append(
                load_and_cache_examples(args,
                                        eval_task,
                                        tokenizer,
                                        evaluate=True))
    logger.info("数据集已加载")

    #加载模型并初始化, 只用student模型,其实这里相当于在MNLI数据上训练教师模型,只训练一个模型
    model_S = BertForGLUESimple(bert_config_S,
                                num_labels=num_labels,
                                args=args)
    #初始化student模型
    if args.load_model_type == 'bert':
        assert args.init_checkpoint_S is not None
        state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')
        if args.only_load_embedding:
            state_weight = {
                k[5:]: v
                for k, v in state_dict_S.items()
                if k.startswith('bert.embeddings')
            }
            missing_keys, _ = model_S.bert.load_state_dict(state_weight,
                                                           strict=False)
            logger.info(f"Missing keys {list(missing_keys)}")
        else:
            state_weight = {
                k[5:]: v
                for k, v in state_dict_S.items() if k.startswith('bert.')
            }
            missing_keys, _ = model_S.bert.load_state_dict(state_weight,
                                                           strict=False)
            assert len(missing_keys) == 0
        logger.info("Model loaded")
    elif args.load_model_type == 'all':
        assert args.tuned_checkpoint_S is not None
        state_dict_S = torch.load(args.tuned_checkpoint_S, map_location='cpu')
        model_S.load_state_dict(state_dict_S)
        logger.info("Model loaded")
    else:
        logger.info("Model is randomly initialized.")
    model_S.to(device)

    if args.local_rank != -1 or n_gpu > 1:
        if args.local_rank != -1:
            raise NotImplementedError
        elif n_gpu > 1:
            model_S = torch.nn.DataParallel(model_S)  #,output_device=n_gpu-1)

    if args.do_train:
        #parameters
        params = list(model_S.named_parameters())
        all_trainable_params = divide_parameters(params, lr=args.learning_rate)
        logger.info("Length of all_trainable_params: %d",
                    len(all_trainable_params))
        # 优化器设置
        optimizer = BERTAdam(all_trainable_params,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_steps,
                             schedule=args.schedule,
                             s_opt1=args.s_opt1,
                             s_opt2=args.s_opt2,
                             s_opt3=args.s_opt3)

        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_dataset))
        logger.info("  Forward batch size = %d", forward_batch_size)
        logger.info("  Num backward steps = %d", num_train_steps)

        ########### 蒸馏 ###########
        train_config = TrainingConfig(
            gradient_accumulation_steps=args.gradient_accumulation_steps,
            ckpt_frequency=args.ckpt_frequency,
            log_dir=args.output_dir,
            output_dir=args.output_dir,
            device=args.device)

        #执行监督训练,而不是蒸馏。它可以用于训练teacher模型。初始化模型
        distiller = BasicTrainer(train_config=train_config,
                                 model=model_S,
                                 adaptor=BertForGLUESimpleAdaptorTraining)

        if args.local_rank == -1:
            train_sampler = RandomSampler(train_dataset)
        else:
            raise NotImplementedError
        train_dataloader = DataLoader(train_dataset,
                                      sampler=train_sampler,
                                      batch_size=args.forward_batch_size,
                                      drop_last=True)
        callback_func = partial(predict,
                                eval_datasets=eval_datasets,
                                args=args)
        with distiller:
            distiller.train(optimizer,
                            scheduler=None,
                            dataloader=train_dataloader,
                            num_epochs=args.num_train_epochs,
                            callback=callback_func)

    if not args.do_train and args.do_predict:
        res = predict(model_S, eval_datasets, step=0, args=args)
        print(res)
예제 #38
0
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import optimizer
import os

import numpy as np
import time
import datetime
import path
import shutil

import config

args = config.parse()

# gpu, seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
os.environ['PYTHONHASHSEED'] = str(args.seed)

use_norm = 'use-norm' if args.use_norm else 'no-norm'
add_self_loop = 'add-self-loop' if args.add_self_loop else 'no-self-loop'

#### configure output directory

dataname = f'{args.data}_{args.dataset}'
model_name = args.model_name
예제 #39
0
def main():
    #parse arguments
    config.parse()
    args = config.args
    for k, v in vars(args).items():
        logger.info(f"{k}:{v}")
    #set seeds
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed_all(args.random_seed)
    np.random.seed(args.random_seed)
    random.seed(args.random_seed)

    #arguments check
    device, n_gpu = args_check(args)
    os.makedirs(args.output_dir, exist_ok=True)
    forward_batch_size = int(args.train_batch_size /
                             args.gradient_accumulation_steps)
    args.forward_batch_size = forward_batch_size

    #load bert config
    bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)
    assert args.max_seq_length <= bert_config_S.max_position_embeddings

    #read data
    train_examples = None
    train_features = None
    eval_examples = None
    eval_features = None
    num_train_steps = None

    tokenizer = ChineseFullTokenizer(vocab_file=args.vocab_file,
                                     do_lower_case=args.do_lower_case)
    convert_fn = partial(convert_examples_to_features,
                         tokenizer=tokenizer,
                         max_seq_length=args.max_seq_length,
                         doc_stride=args.doc_stride,
                         max_query_length=args.max_query_length)
    if args.do_train:
        train_examples, train_features = read_and_convert(
            args.train_file,
            is_training=True,
            do_lower_case=args.do_lower_case,
            read_fn=read_squad_examples,
            convert_fn=convert_fn)
        if args.fake_file_1:
            fake_examples1, fake_features1 = read_and_convert(
                args.fake_file_1,
                is_training=True,
                do_lower_case=args.do_lower_case,
                read_fn=read_squad_examples,
                convert_fn=convert_fn)
            train_examples += fake_examples1
            train_features += fake_features1
        if args.fake_file_2:
            fake_examples2, fake_features2 = read_and_convert(
                args.fake_file_2,
                is_training=True,
                do_lower_case=args.do_lower_case,
                read_fn=read_squad_examples,
                convert_fn=convert_fn)
            train_examples += fake_examples2
            train_features += fake_features2

        num_train_steps = int(len(train_features) /
                              args.train_batch_size) * args.num_train_epochs

    if args.do_predict:
        eval_examples, eval_features = read_and_convert(
            args.predict_file,
            is_training=False,
            do_lower_case=args.do_lower_case,
            read_fn=read_squad_examples,
            convert_fn=convert_fn)

    #Build Model and load checkpoint
    model_S = BertForQASimple(bert_config_S, args)
    #Load student
    if args.load_model_type == 'bert':
        assert args.init_checkpoint_S is not None
        state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')
        state_weight = {
            k[5:]: v
            for k, v in state_dict_S.items() if k.startswith('bert.')
        }
        missing_keys, _ = model_S.bert.load_state_dict(state_weight,
                                                       strict=False)
        assert len(missing_keys) == 0
    elif args.load_model_type == 'all':
        assert args.tuned_checkpoint_S is not None
        state_dict_S = torch.load(args.tuned_checkpoint_S, map_location='cpu')
        model_S.load_state_dict(state_dict_S)
    else:
        logger.info("Model is randomly initialized.")
    model_S.to(device)

    if args.local_rank != -1 or n_gpu > 1:
        if args.local_rank != -1:
            raise NotImplementedError
        elif n_gpu > 1:
            model_S = torch.nn.DataParallel(model_S)  #,output_device=n_gpu-1)

    if args.do_train:
        #parameters
        params = list(model_S.named_parameters())
        all_trainable_params = divide_parameters(params, lr=args.learning_rate)
        logger.info("Length of all_trainable_params: %d",
                    len(all_trainable_params))

        optimizer = BERTAdam(all_trainable_params,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_steps,
                             schedule=args.schedule,
                             s_opt1=args.s_opt1,
                             s_opt2=args.s_opt2,
                             s_opt3=args.s_opt3)

        logger.info("***** Running training *****")
        logger.info("  Num orig examples = %d", len(train_examples))
        logger.info("  Num split examples = %d", len(train_features))
        logger.info("  Forward batch size = %d", forward_batch_size)
        logger.info("  Num backward steps = %d", num_train_steps)

        ########### DISTILLATION ###########
        train_config = TrainingConfig(
            gradient_accumulation_steps=args.gradient_accumulation_steps,
            ckpt_frequency=args.ckpt_frequency,
            log_dir=args.output_dir,
            output_dir=args.output_dir,
            device=args.device)

        distiller = BasicTrainer(train_config=train_config,
                                 model=model_S,
                                 adaptor=BertForQASimpleAdaptorTraining)

        all_input_ids = torch.tensor([f.input_ids for f in train_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features],
                                      dtype=torch.long)
        all_doc_mask = torch.tensor([f.doc_mask for f in train_features],
                                    dtype=torch.float)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features],
                                       dtype=torch.long)
        all_start_positions = torch.tensor(
            [f.start_position for f in train_features], dtype=torch.long)
        all_end_positions = torch.tensor(
            [f.end_position for f in train_features], dtype=torch.long)

        train_dataset = TensorDataset(all_input_ids, all_segment_ids,
                                      all_input_mask, all_doc_mask,
                                      all_start_positions, all_end_positions)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_dataset)
        else:
            raise NotImplementedError
        train_dataloader = DataLoader(train_dataset,
                                      sampler=train_sampler,
                                      batch_size=args.forward_batch_size,
                                      drop_last=True)
        callback_func = partial(predict,
                                eval_examples=eval_examples,
                                eval_features=eval_features,
                                args=args)
        with distiller:
            distiller.train(optimizer,
                            scheduler=None,
                            dataloader=train_dataloader,
                            num_epochs=args.num_train_epochs,
                            callback=callback_func)

    if not args.do_train and args.do_predict:
        res = predict(model_S, eval_examples, eval_features, step=0, args=args)
        print(res)
예제 #40
0
def main():
    #parse arguments
    config.parse()
    args = config.args
    for k, v in vars(args).items():
        logger.info(f"{k}:{v}")
    #set seeds
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed_all(args.random_seed)
    np.random.seed(args.random_seed)
    random.seed(args.random_seed)

    #arguments check
    device, n_gpu = args_check(args)
    os.makedirs(args.output_dir, exist_ok=True)
    forward_batch_size = int(args.train_batch_size /
                             args.gradient_accumulation_steps)
    args.forward_batch_size = forward_batch_size

    #load bert config
    bert_config_T = BertConfig.from_json_file(args.bert_config_file_T)
    bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)
    assert args.max_seq_length <= bert_config_T.max_position_embeddings
    assert args.max_seq_length <= bert_config_S.max_position_embeddings

    #Prepare GLUE task
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    #read data
    train_dataset = None
    eval_datasets = None
    num_train_steps = None
    tokenizer = BertTokenizer(vocab_file=args.vocab_file,
                              do_lower_case=args.do_lower_case)
    # 加载数据集
    if args.do_train:
        train_dataset, examples = load_and_cache_examples(args,
                                                          args.task_name,
                                                          tokenizer,
                                                          evaluate=False)
        if args.aux_task_name:
            aux_train_dataset, examples = load_and_cache_examples(
                args,
                args.aux_task_name,
                tokenizer,
                evaluate=False,
                is_aux=True)
            train_dataset = torch.utils.data.ConcatDataset(
                [train_dataset, aux_train_dataset])
        num_train_steps = int(
            len(train_dataset) / args.train_batch_size) * args.num_train_epochs
    if args.do_predict:
        eval_datasets = []
        eval_task_names = ("mnli",
                           "mnli-mm") if args.task_name == "mnli" else (
                               args.task_name, )
        for eval_task in eval_task_names:
            eval_dataset, examples = load_and_cache_examples(args,
                                                             eval_task,
                                                             tokenizer,
                                                             evaluate=True)
            eval_datasets.append(eval_dataset)
    logger.info("数据集加载成功")

    #加载模型,加载teacher和student模型
    model_T = BertForGLUESimple(bert_config_T,
                                num_labels=num_labels,
                                args=args)
    model_S = BertForGLUESimple(bert_config_S,
                                num_labels=num_labels,
                                args=args)
    #加载teacher模型参数
    if args.tuned_checkpoint_T is not None:
        state_dict_T = torch.load(args.tuned_checkpoint_T, map_location='cpu')
        model_T.load_state_dict(state_dict_T)
        model_T.eval()
    else:
        assert args.do_predict is True
    #Load student
    if args.load_model_type == 'bert':
        assert args.init_checkpoint_S is not None
        state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')
        if args.only_load_embedding:
            state_weight = {
                k[5:]: v
                for k, v in state_dict_S.items()
                if k.startswith('bert.embeddings')
            }
            missing_keys, _ = model_S.bert.load_state_dict(state_weight,
                                                           strict=False)
            logger.info(f"Missing keys {list(missing_keys)}")
        else:
            state_weight = {
                k[5:]: v
                for k, v in state_dict_S.items() if k.startswith('bert.')
            }
            missing_keys, _ = model_S.bert.load_state_dict(state_weight,
                                                           strict=False)
            assert len(missing_keys) == 0
        logger.info("Model loaded")
    elif args.load_model_type == 'all':
        assert args.tuned_checkpoint_S is not None
        state_dict_S = torch.load(args.tuned_checkpoint_S, map_location='cpu')
        model_S.load_state_dict(state_dict_S)
        logger.info("Model loaded")
    else:
        logger.info("Student模型没有可加载参数,随机初始化参数 randomly initialized.")
    model_T.to(device)
    model_S.to(device)

    if args.local_rank != -1 or n_gpu > 1:
        if args.local_rank != -1:
            raise NotImplementedError
        elif n_gpu > 1:
            model_T = torch.nn.DataParallel(model_T)  #,output_device=n_gpu-1)
            model_S = torch.nn.DataParallel(model_S)  #,output_device=n_gpu-1)

    if args.do_train:
        #parameters
        params = list(model_S.named_parameters())
        all_trainable_params = divide_parameters(params, lr=args.learning_rate)
        logger.info("Length of all_trainable_params: %d",
                    len(all_trainable_params))
        #优化器配置
        optimizer = BERTAdam(all_trainable_params,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=num_train_steps,
                             schedule=args.schedule,
                             s_opt1=args.s_opt1,
                             s_opt2=args.s_opt2,
                             s_opt3=args.s_opt3)

        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_dataset))
        logger.info("  Forward batch size = %d", forward_batch_size)
        logger.info("  Num backward steps = %d", num_train_steps)

        ########### DISTILLATION ###########
        train_config = TrainingConfig(
            gradient_accumulation_steps=args.gradient_accumulation_steps,
            ckpt_frequency=args.ckpt_frequency,
            log_dir=args.output_dir,
            output_dir=args.output_dir,
            device=args.device)
        # 定义了一些固定的matches配置文件
        from matches import matches
        intermediate_matches = None
        if isinstance(args.matches, (list, tuple)):
            intermediate_matches = []
            for match in args.matches:
                intermediate_matches += matches[match]
        logger.info(f"中间层match信息: {intermediate_matches}")
        distill_config = DistillationConfig(
            temperature=args.temperature,
            intermediate_matches=intermediate_matches)

        logger.info(f"训练配置: {train_config}")
        logger.info(f"蒸馏配置: {distill_config}")
        adaptor_T = partial(BertForGLUESimpleAdaptor,
                            no_logits=args.no_logits,
                            no_mask=args.no_inputs_mask)
        adaptor_S = partial(BertForGLUESimpleAdaptor,
                            no_logits=args.no_logits,
                            no_mask=args.no_inputs_mask)
        # 支持中间状态匹配的通用蒸馏模型
        distiller = GeneralDistiller(train_config=train_config,
                                     distill_config=distill_config,
                                     model_T=model_T,
                                     model_S=model_S,
                                     adaptor_T=adaptor_T,
                                     adaptor_S=adaptor_S)

        if args.local_rank == -1:
            train_sampler = RandomSampler(train_dataset)
        else:
            raise NotImplementedError
        train_dataloader = DataLoader(train_dataset,
                                      sampler=train_sampler,
                                      batch_size=args.forward_batch_size,
                                      drop_last=True)
        callback_func = partial(predict,
                                eval_datasets=eval_datasets,
                                args=args,
                                examples=examples)
        with distiller:
            distiller.train(optimizer,
                            scheduler=None,
                            dataloader=train_dataloader,
                            num_epochs=args.num_train_epochs,
                            callback=callback_func)

    if not args.do_train and args.do_predict:
        res = predict(model_S,
                      eval_datasets,
                      step=0,
                      args=args,
                      examples=examples,
                      label_list=label_list)
        print(res)
예제 #41
0
        # Specifies where to find all the card images for each set.
        self.card_sets = [CardSet(name, find_cards(folder), enabled)
            for name, (folder, enabled) in kwargs['card_sets'].iteritems()]
     
        super(Application, self).__init__(*args, **kwargs)


settings = {
    'static_path' : os.path.join(os.path.dirname(__file__), 'static'),
    'template_path' : os.path.join(os.path.dirname(__file__), 'templates'),
    'debug' : True,
}

configFilename = (sys.argv + ['config.json'])[1]
settings.update(config.parse(configFilename))

application = Application([
    (r'/', MainHandler),
    (r'/main.js', MainJSHandler),
    (r'/main.css', MainCSSHandler),
    (r'/setusername', SetUsernameHandler),
    (r'/create', CreateHandler),
    (r'/getgames', GetGamesHandler),
    (r'/getusers', GetUsersHandler),
    (r'/game/([0-9]+)/(.+)', GameHandler),
    (r'/chat', ChatHandler),
], **settings)

if __name__ == "__main__":
예제 #42
0
 def __init__(self, *args, **kwargs):
     self.cfg = config.parse(CONFIG)
     super(InstallerServerApp, self).__init__(*args, **kwargs)
예제 #43
0
def main():
    usage = 'usage: %prog [options] [pagename ...]'
    version = '%%prog %s (%s)' % (__version__, __date__)

    optparser = OptionParser(usage=usage, version=version)
    optparser.add_option('-m', '--make', action='store_true', help='build modified pages')
    optparser.add_option('-b', '--build', action='store_true', help='build all pages')
    optparser.add_option('-c', '--clean', action='store_true', help='remove html files')
    optparser.add_option('-s', '--synchronize', action='store_true', help='upload modified files to the FTP '
        'server; wiki files are not uploded; subdirectories *ARE* uploaded; requires ftputil library; FTP '
        'has to be configured using the config file; this switch can be combined with any of the above '
        'three')
    optparser.add_option('-f', '--force', action='store_true', help='when used together with --synchronize, '
        'causes the files and directories that does not exist locally to be deleted from the FTP server')
    optparser.add_option('-d', '--directory', dest='dir', help='wiki directory, defaults to current directory',
        default='.')
    optparser.add_option('-g', '--config', help='name of the config file relative to the wiki directory, '
        'defaults to _statwiki.config', default='_statwiki.config')

    options, args = optparser.parse_args()
    a = [name for name, value in options.__dict__.items() if name in ('make', 'build', 'clean') and value]
    if len(a) > 1:
        sys.exit('error: only one of --make, --build and --clean switches can be used at once')
    try:
        mode = a[0]
    except IndexError:
        if options.synchronize:
            # if only --synchronize was specified, do nothing besides syncing
            mode = 'idle'
        else:
            sys.exit('error: one of the --make, --build, --clean or --synchronize switches must '
                'be specified; use --help for more information')

    os.chdir(options.dir)
    config.parse(wikiutil.fixFileNameCase(options.config))

    if args:
        # Add .wiki to the names if needed.
        wikipages = []
        for name in args:
            if not name.endswith('.wiki'):
                name = wikiutil.pageName2inputFile(name)
            wikipages.append(wikiutil.fixFileNameCase(name))
    else:
        wikipages = [x for x in os.listdir('.') if x.endswith('.wiki') and not x.startswith('_')]

    if mode == 'clean':
        print 'Cleaning...'
        for filename in wikipages:
            pagename = wikiutil.inputFile2pageName(filename)
            try:
                os.unlink(wikiutil.pageName2outputFile(pagename))
                print wikiutil.pageName2outputFile(pagename)
            except OSError:
                pass

    elif mode == 'make':
        print 'Making...'
        todo = []
        for filename in wikipages:
            ofilename = wikiutil.inputFile2outputFile(filename)
            if not os.path.exists(ofilename) or \
                    os.path.getmtime(ofilename) < os.path.getmtime(filename):
                todo.append(filename)
        process(todo)

    elif mode == 'build':
        print 'Building...'
        process(wikipages)
    
    if options.synchronize:
        print 'Synchronizing with %s...' % getattr(config.ftp, 'host', '???')
        try:
            host = config.ftp.host
        except AttributeError:
            sys.exit('cannot synchronize, configure the FTP server access first')
        # Import ftpsync only if --synchronize was specified so that ftputil doesn't have to be
        # installed if this option is not used.
        from ftpsync import synchronize
        synchronize(options.force)
    elif options.force:
        sys.exit('error: --force can only be used together with --synchronize')
예제 #44
0
파일: slurp.py 프로젝트: cilsat/slurp
    pp = pd.concat((p, sp))
    pp.sort_values(['x','y'], inplace=True)
    # log(pp.head().to_string())
    # log('\n')
    # log(pp.tail().to_string())
    log('Pre-processing... ')
    pp.drop(pp[pp.r < config.config['min_height']].index, inplace=True)
    pp.drop(pp[pp.r > config.config['max_height']].index, inplace=True)
    rh_min = 1.6*config.config['cellsize']
    pp.loc[pp.rh < rh_min, 'rh'] = rh_min
    adj = get_groupies(pp, grad=config.config['gradient'])
    log('Done\n')
    return pp, adj

if __name__ == "__main__":
    from interpolator import Interpolator
    from writer import Writer
    import config

    config.parse()
    if len(sys.argv) > 1:
        df, adj = prep(fbore=sys.argv[1], fscreen=sys.argv[2], config=config)
        w = Writer(sys.argv[3])
        i = Interpolator(df, adj, w)
        i.interpolate()
    else:
        df, adj = prep(config=config)
        i = Interpolator(df, adj)
        i.interpolate()

예제 #45
0
파일: sync.py 프로젝트: mor1/py-perscon
def main():
    global Verbose
    
    ## mort: this config stuff is a bit grim - really need a proper
    ## plugin interface
    configfile = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                              "..", "..", "perscon", "perscon.conf")
    config.parse(configfile)
    uri = "http://localhost:%d/" % (config.port(),)
    Perscon_utils.init_url(uri)

    service = "twitter.com"
    username, password = Perscon_utils.get_credentials(service)
    ## mort: also note that by using Basic authentication the
    ## username/password pair are essentially being passed in the clear
    t = twitter.Twitter(username, password)

    ## 1. tweets mentioning us
    tsearch = twitter.Twitter(username, password, domain="search.twitter.com")
    pg = 1
    while True:
        rs = retryOnError("search pg=%d" % pg,
                          lambda: tsearch.search(rpp=90, page=pg, q=username))
        if len(rs['results']) == 0: break
        stash_tweets(service, username, rs['results'])
        pg += 1
  
    ## 2. our own tweets
    pg = 1
    while True:
        rs = retryOnError("own_tweets %d" % (pg,),
                          lambda: t.statuses.user_timeline(page=pg, count=200))
        if len(rs) == 0: break
        stash_tweets(service, username, rs)
        pg += 1

    ## 3. our own retweets (stupid api - not included in above)
    pg = 1
    Verbose = True
    while True:
        rs = retryOnError("own_retweets %d" % (pg,),
                          lambda: t.statuses.retweeted_by_me(page=pg, count=200))
        if len(rs) == 0: break
        stash_tweets(service, username, rs)
        pg += 1
        
    ## 4. direct messages we sent 
    pg = 1
    while True:
        rs = retryOnError("direct_messages_sent %d" % (pg,),
                          lambda: t.direct_messages.sent(page=pg, count=200))
        if len(rs) == 0: break
        stash_tweets(service, username, rs)
        pg += 1
        
    ## 5. direct messages we received
    pg = 1
    while True:
        rs = retryOnError("direct_messages_received %d" % (pg,),
                          lambda: t.direct_messages(page=pg, count=200))
        if len(rs) == 0: break
        stash_tweets(service, username, rs)
        pg += 1

    ## 6. tweets from friends
    cr = -1
    friends = []
    while cr != 0:
        rs = retryOnError("get_friends cursor=%d" % cr,
                          lambda: t.friends.ids(cursor=cr))
        friends.extend(rs['ids'])
        cr = rs['next_cursor']

    print >> sys.stderr, "friends:", friends
    for friend in friends:
        pg = 1
        while True:
            rs = retryOnError(
                "friend_timeline %s %d" % (friend, pg),
                lambda: t.statuses.user_timeline(id=friend, page=pg, count=200))
            if len(rs) == 0: break
            stash_tweets(service, username, rs)
            pg += 1
        print >> sys.stderr, "friend: %s done" % friend
예제 #46
0
파일: mdms_import.py 프로젝트: mvdnes/mdms
def get_dbfs():
    configuration = config.parse('config.toml')
    db = database.get_instance(configuration)
    fs = filesystem.get_instance(configuration)
    return (db, fs)
예제 #47
0
파일: server.py 프로젝트: arvoelke/Dixit
        # Specifies where to find all the card images for each set.
        self.card_sets = [CardSet(name, find_cards(folder), enabled)
            for name, (folder, enabled) in kwargs['card_sets'].iteritems()]
        self.admin_password = kwargs['admin_password']

        super(Application, self).__init__(*args, **kwargs)


settings = {
    'static_path' : os.path.join(os.path.dirname(__file__), 'static'),
    'template_path' : os.path.join(os.path.dirname(__file__), 'templates'),
    'debug' : True,
}

configFilename = (sys.argv + ['config.json'])[1]
settings.update(config.parse(configFilename))

application = Application([
    (r'/', MainHandler),
    (r'/admin', AdminHandler),
    (r'/main.js', MainJSHandler),
    (r'/main.css', MainCSSHandler),
    (r'/setusername', SetUsernameHandler),
    (r'/create', CreateHandler),
    (r'/getgames', GetGamesHandler),
    (r'/getusers', GetUsersHandler),
    (r'/game/([0-9]+)/(.+)', GameHandler),
    (r'/chat', ChatHandler),
], **settings)

if __name__ == "__main__":
예제 #48
0
import renderer
import facial_feature_detector as feature_detection
import camera_calibration as calib
import scipy.io as io
import cv2
import numpy as np
import os
import check_resources as check
import matplotlib.pyplot as plt
import sys
import myutil
import ThreeD_Model
import config

this_path = os.path.dirname(os.path.abspath(__file__))
opts = config.parse()
## 3D Models we are gonna use to to the rendering {0, -40, -75}
newModels = opts.getboolean('renderer', 'newRenderedViews')
if opts.getboolean('renderer', 'newRenderedViews'):
    pose_models_folder = '/models3d_new/'
    pose_models = [
        'model3D_aug_-00_00', 'model3D_aug_-22_00', 'model3D_aug_-40_00',
        'model3D_aug_-55_00', 'model3D_aug_-75_00'
    ]
else:
    pose_models_folder = '/models3d/'
    pose_models = [
        'model3D_aug_-00',
        'model3D_aug_-40',
        'model3D_aug_-75',
    ]
예제 #49
0
import os
import sys
import inferer
from config import parse

os.environ['CUDA_VISIBLE_DEVICES']='1'
config_path = './param_config.yml'
config = parse(config_path)

inferer = inferer.Inferer(config)
inferer()


예제 #50
0
def main():
    #parse arguments
    config.parse()
    args = config.args
    for k, v in vars(args).items():
        logger.info(f"{k}:{v}")
    #set seeds
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed_all(args.random_seed)
    np.random.seed(args.random_seed)
    random.seed(args.random_seed)

    #arguments check
    device, n_gpu = args_check(args)
    os.makedirs(args.output_dir, exist_ok=True)
    forward_batch_size = int(args.train_batch_size /
                             args.gradient_accumulation_steps)
    args.forward_batch_size = forward_batch_size

    #load config
    teachers_and_student = parse_model_config(args.model_config_json)

    #Prepare GLUE task
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    #read data
    train_dataset = None
    eval_datasets = None
    num_train_steps = None

    tokenizer_S = teachers_and_student['student']['tokenizer']
    prefix_S = teachers_and_student['student']['prefix']

    if args.do_train:
        train_dataset = load_and_cache_examples(args,
                                                args.task_name,
                                                tokenizer_S,
                                                prefix=prefix_S,
                                                evaluate=False)
    if args.do_predict:
        eval_datasets = []
        eval_task_names = ("mnli",
                           "mnli-mm") if args.task_name == "mnli" else (
                               args.task_name, )
        for eval_task in eval_task_names:
            eval_datasets.append(
                load_and_cache_examples(args,
                                        eval_task,
                                        tokenizer_S,
                                        prefix=prefix_S,
                                        evaluate=True))
    logger.info("Data loaded")

    #Build Model and load checkpoint
    if args.do_train:
        model_Ts = []
        for teacher in teachers_and_student['teachers']:
            model_type_T = teacher['model_type']
            model_config_T = teacher['config']
            checkpoint_T = teacher['checkpoint']

            _, _, model_class_T = MODEL_CLASSES[model_type_T]
            model_T = model_class_T(model_config_T, num_labels=num_labels)
            state_dict_T = torch.load(checkpoint_T, map_location='cpu')
            missing_keys, un_keys = model_T.load_state_dict(state_dict_T,
                                                            strict=True)
            logger.info(f"Teacher Model {model_type_T} loaded")
            model_T.to(device)
            model_T.eval()
            model_Ts.append(model_T)

    student = teachers_and_student['student']
    model_type_S = student['model_type']
    model_config_S = student['config']
    checkpoint_S = student['checkpoint']
    _, _, model_class_S = MODEL_CLASSES[model_type_S]
    model_S = model_class_S(model_config_S, num_labels=num_labels)
    if checkpoint_S is not None:
        state_dict_S = torch.load(checkpoint_S, map_location='cpu')
        missing_keys, un_keys = model_S.load_state_dict(state_dict_S,
                                                        strict=False)
        logger.info(f"missing keys:{missing_keys}")
        logger.info(f"unexpected keys:{un_keys}")
    else:
        logger.warning("Initializing student randomly")
    logger.info("Student Model loaded")
    model_S.to(device)

    if args.local_rank != -1 or n_gpu > 1:
        if args.local_rank != -1:
            raise NotImplementedError
        elif n_gpu > 1:
            if args.do_train:
                model_Ts = [
                    torch.nn.DataParallel(model_T) for model_T in model_Ts
                ]
            model_S = torch.nn.DataParallel(model_S)  #,output_device=n_gpu-1)

    if args.do_train:
        #parameters
        params = list(model_S.named_parameters())
        all_trainable_params = divide_parameters(params, lr=args.learning_rate)
        logger.info("Length of all_trainable_params: %d",
                    len(all_trainable_params))

        if args.local_rank == -1:
            train_sampler = RandomSampler(train_dataset)
        else:
            raise NotImplementedError
        train_dataloader = DataLoader(train_dataset,
                                      sampler=train_sampler,
                                      batch_size=args.forward_batch_size,
                                      drop_last=True)
        num_train_steps = int(
            len(train_dataloader) // args.gradient_accumulation_steps *
            args.num_train_epochs)

        ########## DISTILLATION ###########
        train_config = TrainingConfig(
            gradient_accumulation_steps=args.gradient_accumulation_steps,
            ckpt_frequency=args.ckpt_frequency,
            log_dir=args.output_dir,
            output_dir=args.output_dir,
            fp16=args.fp16,
            device=args.device)

        distill_config = DistillationConfig(temperature=args.temperature,
                                            kd_loss_type='ce')

        logger.info(f"{train_config}")
        logger.info(f"{distill_config}")
        adaptor_T = BertForGLUESimpleAdaptor
        adaptor_S = BertForGLUESimpleAdaptor

        distiller = MultiTeacherDistiller(train_config=train_config,
                                          distill_config=distill_config,
                                          model_T=model_Ts,
                                          model_S=model_S,
                                          adaptor_T=adaptor_T,
                                          adaptor_S=adaptor_S)

        optimizer = AdamW(all_trainable_params, lr=args.learning_rate)
        scheduler_class = get_linear_schedule_with_warmup
        scheduler_args = {
            'num_warmup_steps': int(args.warmup_proportion * num_train_steps),
            'num_training_steps': num_train_steps
        }

        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_dataset))
        logger.info("  Forward batch size = %d", forward_batch_size)
        logger.info("  Num backward steps = %d", num_train_steps)

        callback_func = partial(predict,
                                eval_datasets=eval_datasets,
                                args=args)
        with distiller:
            distiller.train(optimizer,
                            scheduler_class=scheduler_class,
                            scheduler_args=scheduler_args,
                            dataloader=train_dataloader,
                            num_epochs=args.num_train_epochs,
                            callback=callback_func,
                            max_grad_norm=1)

    if not args.do_train and args.do_predict:
        res = predict(model_S, eval_datasets, step=0, args=args)
        print(res)
예제 #51
0
def main():
    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('-c', dest='config')
    args = parser.parse_args()
    config.parse(args.config)
    train()
예제 #52
0
파일: erai2icar.py 프로젝트: wcurrier/icar
import io_routines
import output
import convert


def main(info):

    for i in range(info.ntimes):
        raw_data = io_routines.load_data(info.times[i], info)
        processed_data = convert.era2icar(raw_data)
        output.write_file(info.times[i], info, processed_data)


if __name__ == '__main__':
    try:
        info = config.parse()
        config.update_info(info)

        exit_code = main(info)
        if exit_code is None:
            exit_code = 0
        sys.exit(exit_code)
    except KeyboardInterrupt as e:  # Ctrl-C
        raise e
    except SystemExit as e:  # sys.exit()
        raise e
    except Exception as e:
        print('ERROR, UNEXPECTED EXCEPTION')
        print(str(e))
        traceback.print_exc()
        os._exit(1)
예제 #53
0
#!/usr/bin/env python

import sys
sys.path.append('scripts')
sys.path.append('scripts/core_common')
sys.path.append('scripts/core_common/modules')
import config
import base
import build
import build_js
import build_server
import deploy
import make_common

# parse configuration
config.parse()

base_dir = base.get_script_dir(__file__)
base.set_env("BUILD_PLATFORM", config.option("platform"))

# branding
if ("1" != base.get_env("OO_RUNNING_BRANDING")) and (
        "" != config.option("branding")):
    branding_dir = base_dir + "/../" + config.option("branding")

    if ("1" == config.option("update")):
        is_exist = True
        if not base.is_dir(branding_dir):
            is_exist = False
            base.cmd(
                "git",
예제 #54
0
#!python3
import sys

from config import parse
from utils import Request, NotFoundError


class SimulatedRequest(Request):
    def __init__(self):
        self.host = 'http://example.org'


try:
    print(parse(SimulatedRequest(), *sys.argv[1:]))
except NotFoundError as e:
    print(e)
    sys.exit(2)
예제 #55
0
import config
import io_routines
import output
import convert

def main(info):
    
    for i in range(info.ntimes):
        raw_data=io_routines.load_data(info.times[i],info)
        processed_data=convert.era2icar(raw_data)
        output.write_file(info.times[i],info,processed_data)


if __name__ == '__main__':
    try:
        info=config.parse()
        config.update_info(info)
        
        exit_code = main(info)
        if exit_code is None:
            exit_code = 0
        sys.exit(exit_code)
    except KeyboardInterrupt, e: # Ctrl-C
        raise e
    except SystemExit, e: # sys.exit()
        raise e
    except Exception, e:
        print('ERROR, UNEXPECTED EXCEPTION')
        print(str(e))
        traceback.print_exc()
        os._exit(1)
예제 #56
0
from flask import Flask, make_response, request
from config import parse
from glue import expose_as_api
from leagues import Leagues
import os
from pymongo import MongoClient, version
from json import dumps

config = parse('config.yml')
mongo = MongoClient(os.environ['DB_PORT_27017_TCP_ADDR'],
                    27017).dota2league_tracker
app = Flask(__name__)


#TODO: add more depth here
@app.route('/health')
def get_health():
    return "Ok"


@app.route('/config')
def get_config():
    return dumps(config)


leagues = Leagues(mongo)
expose_as_api(app, leagues, '/leagues')

from matches import process_leagues_in_background, process_matches_in_background, Matches

matches = Matches(mongo)
예제 #57
0
SITE = "test-" + ID

WALOG = os.path.join(ACCUMULO_HOME, 'walogs', ID)
General_CLASSPATH = "$ACCUMULO_HOME/lib/[^.].$ACCUMULO_VERSION.jar, $ACCUMULO_HOME/lib/[^.].*.jar, $ZOOKEEPER_HOME/zookeeper[^.].*.jar, $HADOOP_HOME/conf,$HADOOP_HOME/[^.].*.jar, $HADOOP_HOME/lib/[^.].*.jar"

log = logging.getLogger('test.auto')

ROOT = 'root'
ROOT_PASSWORD = '******'
INSTANCE_NAME = ID
ZOOKEEPERS = socket.getfqdn()

accumulo_site = os.path.join(ACCUMULO_HOME, 'conf', 'accumulo-site.xml')
if os.path.exists(accumulo_site):
    import config
    ZOOKEEPERS = config.parse(accumulo_site).get('instance.zookeeper.host',
                                                 ZOOKEEPERS)


class Popen(BasePopen):
    def __init__(self, cmd, **args):
        self.cmd = cmd
        BasePopen.__init__(self, cmd, **args)


def quote(cmd):
    result = []
    for part in cmd:
        if '"' in part:
            result.append("'%s'" % part)
        else:
            result.append('"%s"' % part)
예제 #58
0
import config
import update

# Reading common options from env:
MODE_DIR = os.getenv("MODE_DIR")
COMPILER = os.getenv("COMPILER")
LOGGER = os.getenv("LOGGER")

# Reading user's code options from env
USER = os.getenv("USER")
SUBID = os.getenv("SUBID")
PROBLEM = os.getenv("PROBLEM")
LANGUAGE = os.getenv('LANGUAGE')

# Reading problem specific options from problem config
CONFIG = config.parse()

# Initializing log
prefix = SUBID + ':' + USER + ':' + PROBLEM + ':' + LANGUAGE + ': '
logmsg = ''

# Start compiling
update.status ('COMPILE', SUBID, -1)
compret = os.system (COMPILER + ' ' + LANGUAGE)
compret /= 256

update.status ('COMPILE', SUBID, compret)

if compret == 124:		# Compile time limit exceeded, refer to Gnu timeout manual
	logmsg = 'Compile time limit exceeded'
elif compret:			# Unspecified Compilation error
예제 #59
0
DBusGMainLoop(set_as_default=True)

parser = OptionParser()
parser.add_option("-d", "--debug", action="store_true", default=False, help="enable debugging")
parser.add_option("-c", "--config", default=None, help="configuration file to read")
(options, args) = parser.parse_args()

if options.debug:
    logging.basicConfig(level=logging.DEBUG)
else:
    logging.basicConfig(level=logging.WARNING)

logger = logging.getLogger("shackleton")

try:
    contexts = config.parse(options.config)
except IOError, e:
    logger.warning(str(e))
    sys.exit(1)

current_contexts = set()

for c in contexts.itervalues():
    def changed(context):
        logger.debug("Context %s changed" % context)
        # TODO: wrap in try/except
        if context.evaluateRules():
            if context not in current_contexts:
                current_contexts.add(context)
                notify.enter(context)
                context.runEnteringActions()