def parse_response(text):
    # Special cases
    if any(excluded_case in text for excluded_case in ['(broken file)', 'versus (TI ', 'Ceeeb']):
        return None

    text = re.sub(r'…', '...', text)  # Replace ellipsis with three dots

    regexps_empty_sub = [r'<!--.*?-->',  # Remove comments
                         r'{{resp\|(r|u|\d+|d\|\d+|rem)}}',  # Remove response rarity
                         r'{{hero icon\|[a-z- \']+\|\d+px}}',  # Remove hero icon
                         r'{{item( icon)?\|[a-z0-9() \']+\|\d+px}}',  # Remove item icon
                         r'\[\[File:[a-z.,!\'() ]+\|\d+px\|link=[a-z,!\'() ]+]]',  # Remove Files
                         r'<small>\[\[#[a-z_\-\' ]+\|\'\'followup\'\']]</small>',
                         # Remove followup links in <small> tags
                         r'<small>\'\'[a-z0-9 /]+\'\'</small>',  # Remove text in <small> tags
                         r'<ref>.*?</ref>',  # Remove text in <ref> tags
                         r'<nowiki>.*?</nowiki>',  # Remove text in <nowiki> tags
                         ]
    for regex in regexps_empty_sub:
        text = re.sub(regex, '', text, flags=re.IGNORECASE)

    regexps_sub_text = [r'\[\[([a-zé().:\',\- ]+)]]',  # Replace links such as [[Shitty Wizard]]
                        r'\[\[[a-zé0-9().:\'/ ]+\|([a-zé().:\' ]+)]]',
                        # Replace links such as  [[Ancient (Building)|Ancients]] and [[:File:Axe|Axe]]
                        r'{{tooltip\|(.*?)\|.*?}}',  # Replace tooltips
                        r'{{note\|([a-z.!\'\-?, ]+)\|[a-z.!\'\-?,()/ ]+}}',  # Replace notes
                        ]
    for regex in regexps_sub_text:
        text = re.sub(regex, '\\1', text, flags=re.IGNORECASE)

    if any(escape in text for escape in ['[[', ']]', '{{', '}}', '|', 'sm2']):
        logger.warn('Response could not be processed : ' + text)
        return None

    return text.strip()
예제 #2
0
    def _save_video(self, fname, frames, fps=8.0):
        path = os.path.join(self._config.record_dir, fname)

        def f(t):
            frame_length = len(frames)
            new_fps = 1.0 / (1.0 / fps + 1.0 / frame_length)
            idx = min(int(t * new_fps), frame_length - 1)
            return frames[idx]

        video = mpy.VideoClip(f, duration=len(frames) / fps + 2)

        video.write_videofile(path, fps, verbose=False, logger=None)
        logger.warn("[*] Video saved: {}".format(path))
예제 #3
0
파일: main.py 프로젝트: sts-sadr/furniture
def run(config):
    """
    Runs Trainer.
    """
    rank = MPI.COMM_WORLD.Get_rank()
    config.rank = rank
    config.is_chef = rank == 0
    config.seed = config.seed + rank
    config.num_workers = MPI.COMM_WORLD.Get_size()

    if config.is_chef:
        logger.warn('Run a base worker.')
        make_log_files(config)
    else:
        logger.warn('Run worker %d and disable logger.', config.rank)
        import logging
        logger.setLevel(logging.CRITICAL)

    def shutdown(signal, frame):
        logger.warn('Received signal %s: exiting', signal)
        sys.exit(128+signal)

    signal.signal(signal.SIGHUP, shutdown)
    signal.signal(signal.SIGINT, shutdown)
    signal.signal(signal.SIGTERM, shutdown)

    # set global seed
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)

    os.environ["DISPLAY"] = ":1"

    if config.gpu is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(config.gpu)
        assert torch.cuda.is_available()
        config.device = torch.device("cuda")
    else:
        config.device = torch.device("cpu")

    # build a trainer
    trainer = Trainer(config)
    if config.is_train:
        trainer.train()
        logger.info("Finish training")
    else:
        trainer.evaluate()
        logger.info("Finish evaluating")
예제 #4
0
    def set_active_sequence(self, sequence_name: str):
        self._active_sequence['name'] = sequence_name

        self._sequence = []

        for ID, offsets in self.compile_sequence().items():

            if ID in self.active_workers_ids():
                url = f"{self._workers[ID]['baseURL']}/worker/sequence"
                rs = requests.put(url, json=offsets)
                logger.info(f"PUT {url} [{len(offsets)}] -> {rs.status_code}")
            else:
                logger.warn(
                    f"{ID} is not an active worker, assigning offsets to master"
                )
                self._sequence.extend(offsets)
예제 #5
0
파일: tournament.py 프로젝트: bkeyvani/fsnd
def swissPairings():
    """Returns a list of pairs of players for the next round of a match.

    Assuming that there are an even number of players registered, each player
    appears exactly once in the pairings.  Each player is paired with another
    player with an equal or nearly-equal win record, that is, a player adjacent
    to him or her in the standings.

    Returns:
      A list of tuples, each of which contains (id1, name1, id2, name2)
        id1: the first player's unique id
        name1: the first player's name
        id2: the second player's unique id
        name2: the second player's name
    """
    # ps is a list of (id, name, wins, matches) tuples
    ps = playerStandings()
    # a list of all previous matches
    prev_matches = getMatches()

    pairs = []
    while len(ps):
        # grab the first player from the player standings list
        p1 = ps.pop(0)
        p1_id = p1[0]
        p1_name = p1[1]
        p1_wins = p1[2]
        # get a list of all players with the same wins or one less
        same_rank = [row for row in ps if row[2] == p1_wins]
        # reverse same_rank to reduce the chance of trying to pair already
        # matched players
        same_rank.reverse()
        # pair the next player from the same_rank list who has not been
        # matched before.
        for p2 in same_rank:
            p2_id = p2[0]
            p2_name = p2[1]
            if p1_id != p2_id and not(
                (p1_id, p2_id) in prev_matches or
                (p2_id, p1_id) in prev_matches):
                pairs.append((p1_id, p1_name, p2_id, p2_name))
                ps.remove(p2)
                break
            else:
                logger.warn('skipped: %s', (p1_id, p2_id))

    return pairs
예제 #6
0
    def _save_ckpt(self, ckpt_num, update_iter, env_step):
        ckpt_path = os.path.join(self._config.log_dir,
                                 "ckpt_%08d.pt" % ckpt_num)
        state_dict = {
            "step": ckpt_num,
            "update_iter": update_iter,
            "env_step": env_step,
        }
        state_dict["agent"] = self._agent.state_dict()
        torch.save(state_dict, ckpt_path)
        logger.warn("Save checkpoint: %s", ckpt_path)

        replay_path = os.path.join(self._config.log_dir,
                                   "replay_%08d.pkl" % ckpt_num)
        with gzip.open(replay_path, "wb") as f:
            replay_buffers = {"replay": self._agent.replay_buffer()}
            pickle.dump(replay_buffers, f)
예제 #7
0
def swissPairings():
    """Returns a list of pairs of players for the next round of a match.

    Assuming that there are an even number of players registered, each player
    appears exactly once in the pairings.  Each player is paired with another
    player with an equal or nearly-equal win record, that is, a player adjacent
    to him or her in the standings.

    Returns:
      A list of tuples, each of which contains (id1, name1, id2, name2)
        id1: the first player's unique id
        name1: the first player's name
        id2: the second player's unique id
        name2: the second player's name
    """
    # ps is a list of (id, name, wins, matches) tuples
    ps = playerStandings()
    # a list of all previous matches
    prev_matches = getMatches()

    pairs = []
    while len(ps):
        # grab the first player from the player standings list
        p1 = ps.pop(0)
        p1_id = p1[0]
        p1_name = p1[1]
        p1_wins = p1[2]
        # get a list of all players with the same wins or one less
        same_rank = [row for row in ps if row[2] == p1_wins]
        # reverse same_rank to reduce the chance of trying to pair already
        # matched players
        same_rank.reverse()
        # pair the next player from the same_rank list who has not been
        # matched before.
        for p2 in same_rank:
            p2_id = p2[0]
            p2_name = p2[1]
            if p1_id != p2_id and not ((p1_id, p2_id) in prev_matches or
                                       (p2_id, p1_id) in prev_matches):
                pairs.append((p1_id, p1_name, p2_id, p2_name))
                ps.remove(p2)
                break
            else:
                logger.warn('skipped: %s', (p1_id, p2_id))

    return pairs
    def purge(self):
        if self._pid == 0:
            return

        try:
            for event in self._events:
                try:
                    self._scheduler.cancel(event)
                except ValueError:
                    pass
            try:
                os.kill(self._pid, signal.SIGKILL)
            except ProcessLookupError as e:
                logger.warn(e)
        finally:
            self._pid = 0
            self._events.clear()
            logger.info("❌ purge events")
예제 #9
0
    def _save_ckpt(self, ckpt_num, update_iter):
        """
        Save checkpoint to log directory.
        Args:
            ckpt_num: number appended to checkpoint name. The number of
                environment step is used in this code.
            update_iter: number of policy update. It will be used for resuming training.
        """
        ckpt_path = os.path.join(self._config.log_dir,
                                 'ckpt_%08d.pt' % ckpt_num)
        state_dict = {'step': ckpt_num, 'update_iter': update_iter}
        state_dict['agent'] = self._agent.state_dict()
        torch.save(state_dict, ckpt_path)
        logger.warn("Save checkpoint: %s", ckpt_path)

        replay_path = os.path.join(self._config.log_dir,
                                   'replay_%08d.pkl' % ckpt_num)
        with gzip.open(replay_path, 'wb') as f:
            replay_buffers = {'replay': self._agent.replay_buffer()}
            pickle.dump(replay_buffers, f)
예제 #10
0
    def evaluate(self):
        step, update_iter, _ = self._load_ckpt(ckpt_num=self._config.ckpt_num)

        logger.info(
            "Run %d evaluations at step=%d, update_iter=%d",
            self._config.num_eval,
            step,
            update_iter,
        )
        info_history = defaultdict(list)
        rollouts = []
        for i in trange(self._config.num_eval):
            logger.warn("Evalute run %d", i + 1)
            rollout, info, vids = self._evaluate(step=step,
                                                 record=self._config.record,
                                                 idx=i)
            for k, v in info.items():
                info_history[k].append(v)
            if self._config.save_rollout:
                rollouts.append(rollout)

        keys = ["episode_success", "reward_goal_dist"]
        os.makedirs("result", exist_ok=True)
        with h5py.File("result/{}.hdf5".format(self._config.run_name),
                       "w") as hf:
            for k in keys:
                hf.create_dataset(k, data=info_history[k])

            result = "{:.02f} $\\pm$ {:.02f}".format(
                np.mean(info_history["episode_success"]),
                np.std(info_history["episode_success"]),
            )
            logger.warn(result)

        if self._config.save_rollout:
            os.makedirs("saved_rollouts", exist_ok=True)
            with open("saved_rollouts/{}.p".format(self._config.run_name),
                      "wb") as f:
                pickle.dump(rollouts, f)
예제 #11
0
파일: worker.py 프로젝트: bopopescu/lave
    def run(self):

        self.__busy = True
        while self.__busy:
            # 没任务会阻塞的,不用自己线程自己sleep ...
            task = self.workload.assign_workload()
            logger.info(
                'workload assign task pool size: {0} free count: {1}'.format(
                    self.__pool.size, self.__pool.free_count()))
            if self.__pool.free_count() < 2:
                logger.warn(
                    '[Exception MJOPObserver,type=ex78000,uid=,csuid=,qid={ts},ts={ts},ip={ip},'
                    'refer_id=,cur_id=spider_slave,debug=任务堆积-空闲池:{free}/{size}-等待任务:{count}]'
                    .format(ts=int(time.time() * 1000),
                            ip=local_ip,
                            size=self.__pool.size,
                            free=self.__pool.free_count(),
                            count=self.workload.tasks.qsize()))
            self.__pool.spawn(self.task_entrance, task)

        self.__busy = False

        logger.info("%s stop" % self.thread_name)
예제 #12
0
def get_instagram_data(node_path: str, keyword: str, field: dict):
    for k, v in dict(field).items():
        if not v:
            field.pop(k)

    field = parse.urlencode(field)
    url = base + node_path + keyword + '/?' + field

    try_count = count(0)
    next(try_count)
    while 1:
        logger.info(f'request to "{url}"')
        logger.debug(f'request info : <base : "{base}"> <node_path: {node_path}>'
                     f' <keyword: {keyword}> <encoded_field: {field}>')
        response = requests.get(url)
        logger.debug(f'response status : {response.status_code}')

        try:
            if response.status_code == 200:
                ret = response.json()
                return ret

            else:
                logger.warn('connection error : %s' % response.status_code)
                if next(try_count) > 3:
                    logger.error(f'cannot connect to "{url}"')
                    return None


        except Exception as e:
            print("ERR:", e)
            logger.error('cannot responsed from : "%s" ' % (response.status_code, url))
            logger.error('error message :', e)
            return None

        next(try_count)
예제 #13
0
 def _do_simulation(self, a=None):
     try:
         self.data.ctrl[:] = a[:]
         # self.sim.forward()
         self.sim.step()
     except Exception as e:
         logger.warn(
             "[!] Warning: Simulation is unstable. The episode is terminated."
         )
         logger.warn(e)
         logger.warn(traceback.format_exc())
         self.reset()
         self._fail = True
예제 #14
0
 def do_simulation(self, a):
     try:
         mins = self.action_space.minimum
         maxs = self.action_space.maximum
         a = np.maximum(a, mins)
         a = np.minimum(a, maxs)
         self.data.ctrl[:] = a
         self.sim.forward()
         self.sim.step()
     except Exception as e:
         logger.warn(
             '[!] Warning: Simulation is unstable. The episode is terminated.'
         )
         logger.warn(e)
         logger.warn(traceback.format_exc())
         self.reset()
         self._fail = True
예제 #15
0
    def _load_ckpt(self, ckpt_num=None):
        ckpt_path, ckpt_num = get_ckpt_path(self._config.log_dir, ckpt_num)

        if ckpt_path is not None:
            logger.warn("Load checkpoint %s", ckpt_path)
            ckpt = torch.load(ckpt_path)
            self._agent.load_state_dict(ckpt["agent"])

            if self._config.is_train:
                replay_path = os.path.join(self._config.log_dir,
                                           "replay_%08d.pkl" % ckpt_num)
                logger.warn("Load replay_buffer %s", replay_path)
                with gzip.open(replay_path, "rb") as f:
                    replay_buffers = pickle.load(f)
                    self._agent.load_replay_buffer(replay_buffers["replay"])

            return ckpt["step"], ckpt["update_iter"], ckpt["env_step"]
        else:
            logger.warn("Randomly initialize models")
            return 0, 0, 0
예제 #16
0
    def _load_ckpt(self, ckpt_num=None):
        """
        Loads checkpoint with index number @ckpt_num. If @ckpt_num is None,
        it loads and returns the checkpoint with the largest index number.
        """
        ckpt_path, ckpt_num = get_ckpt_path(self._config.log_dir, ckpt_num)

        if ckpt_path is not None:
            logger.warn('Load checkpoint %s', ckpt_path)
            ckpt = torch.load(ckpt_path)
            self._agent.load_state_dict(ckpt['agent'])

            if self._config.is_train:
                replay_path = os.path.join(self._config.log_dir,
                                           'replay_%08d.pkl' % ckpt_num)
                logger.warn('Load replay_buffer %s', replay_path)
                with gzip.open(replay_path, 'rb') as f:
                    replay_buffers = pickle.load(f)
                    self._agent.load_replay_buffer(replay_buffers['replay'])

            return ckpt['step'], ckpt['update_iter']
        else:
            logger.warn('Randomly initialize models')
            return 0, 0
예제 #17
0
파일: main.py 프로젝트: sts-sadr/furniture
 def shutdown(signal, frame):
     logger.warn('Received signal %s: exiting', signal)
     sys.exit(128+signal)
예제 #18
0
                querys = [s.strip() for s in qf.readlines()]

        except:
            raise

    if args.next_id:
        try:
            logger.info('using next_id preservation.')
            with open(args.next_id, 'r', encoding='utf-8') as nf:
                next_id = {
                    k.strip(): v.strip()
                    for s in nf.readlines() for k, v in (s.split('='), )
                }

        except FileNotFoundError:
            logger.warn('next_id file not found.')

        except:
            raise

    if querys:
        for query in querys:
            next_id[query] = search_query(query,
                                          next_cursor=next_id.get(query, None))

        if args.next_id:
            try:
                logger.info('save next_id for session save.')
                with open(args.next_id, 'w', encoding='utf-8') as nf:
                    for k, v in next_id.items():
                        if v: