Пример #1
0
    def _ParseStaircaseRoom(self, level_num: LevelNum,
                            staircase_room_num: RoomNum) -> None:
        staircase_room = self.data_table.GetRoom(level_num, staircase_room_num)

        if staircase_room.GetType() == RoomType.ITEM_STAIRCASE:
            logging.debug("  Found item staircase %x in L%d " %
                          (staircase_room_num, level_num))
            assert staircase_room.GetLeftExit() == staircase_room.GetRightExit(
            )
            self.data_table.GetRoom(
                level_num,
                staircase_room.GetLeftExit()).SetStaircaseRoomNumber(
                    staircase_room_num)
        elif staircase_room.GetType() == RoomType.TRANSPORT_STAIRCASE:
            logging.debug("  Found transport staircase %x in L%d " %
                          (staircase_room_num, level_num))
            assert staircase_room.GetLeftExit() != staircase_room.GetRightExit(
            )
            for associated_room_num in [
                    staircase_room.GetLeftExit(),
                    staircase_room.GetRightExit()
            ]:
                self.data_table.GetRoom(
                    level_num, associated_room_num).SetStaircaseRoomNumber(
                        staircase_room_num)
        else:
            logging.fatal(
                "Room in staircase room number list (%x) didn't have staircase type (%x)."
                % (staircase_room_num, staircase_room.GetType()))
Пример #2
0
def makeup_multi_case(config_file, s3_path):
    """ run multi case """
    signal.signal(signal.SIGINT, handle_multi_case)
    # fixme: setup with archive path
    if os.path.isdir("log") is False:
        os.makedirs("log")
    if os.path.isdir("tmp_config") is False:
        os.makedirs("tmp_config")

    ret_para = parse_xt_multi_case_paras(config_file)
    config_file_base_name = os.path.split(config_file)[-1]

    for i, para in enumerate(ret_para):
        if i > 9:
            logging.fatal("only support 10 parallel case")
            break

        tmp_config_file = "{}_{}".format(config_file_base_name, i)
        config_file = os.path.join("tmp_config", tmp_config_file)
        write_conf_file(config_file, para)

        abs_config_file = os.path.abspath(config_file)

        log_file = os.path.join("log", "log_{}.log".format(tmp_config_file))

        TRAIN_PROCESS_LIST.append(
            launch_train_with_shell(abs_config_file,
                                    s3_path=s3_path,
                                    stdout2file=log_file))

    while True:
        time.sleep(100)
Пример #3
0
    def _load_episodic_or_batch(self, data_path, repeat):
        if self._mode == "batch":
            # If not episdic, i.e. conventional loading
            files = []
            for alphabet in self._split.split(","):
                path = os.path.join(data_path, alphabet)
                if os.path.isdir(path):
                    file_list = os.listdir(os.path.join(data_path, alphabet))
                    files.extend([os.path.join(data_path, alphabet, c) for c in file_list])
                else:
                    files.append(path)

            return self._create_dataset_from_filepaths(files, repeat)
        elif self._mode == "episodic":
            episodes = []

            for episode_string in self._split.split(";"):
                files = []
                for collection in episode_string.split(","):
                    path = os.path.join(data_path, collection)
                    if os.path.isdir(path):
                        file_list = os.listdir(os.path.join(data_path, collection))
                        files.extend([os.path.join(data_path, collection, file) for file in file_list])
                    else:
                        files.append(path)
                episodes.append(",".join(files))

            return self._create_episodic_dataset_from_nested_filespaths(episodes, self.shot, self.way, repeat)
        else:
            logging.fatal("Dataset mode not \"episodic\" or \"batch\", value supplied: %s", self._mode)
Пример #4
0
def build_optimizer(learning_rate,
                    optimizer_name='rmsprop',
                    decay=0.9,
                    epsilon=0.001,
                    momentum=0.9):
  """Build optimizer."""
  if optimizer_name == 'sgd':
    logging.info('Using SGD optimizer')
    optimizer = tf.compat.v1.train.GradientDescentOptimizer(
        learning_rate=learning_rate)
  elif optimizer_name == 'momentum':
    logging.info('Using Momentum optimizer')
    optimizer = tf.compat.v1.train.MomentumOptimizer(
        learning_rate=learning_rate, momentum=momentum)
  elif optimizer_name == 'rmsprop':
    logging.info('Using RMSProp optimizer')
    optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate, decay,
                                                    momentum, epsilon)
  elif optimizer_name == 'adam':
    logging.info('Using Adam optimizer')
    optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
  else:
    logging.fatal('Unknown optimizer: %s', optimizer_name)

  return optimizer
Пример #5
0
async def _initialize():
    try:
        await asyncio.gather(CardDb.get().initialize(),
                             ManamojiDb.get().initialize(bot))
    except:
        logging.exception('Bot initialization has failed.')
        logging.fatal('Dying due to failed initialization.')
Пример #6
0
def _test_fatal_with_other_threads():
    """Test logging.fatal from main thread, other threads running."""

    lock = threading.Lock()
    lock.acquire()

    def sleep_forever(lock=lock):
        v = VerboseDel('fatal_with_other_threads non-main del called\n')
        try:
            lock.release()
            while True:
                time.sleep(10000)
        finally:
            del v

    v = VerboseDel('fatal_with_other_threads main del called\n')
    try:
        # Start new thread
        t = threading.Thread(target=sleep_forever)
        t.start()

        # Wait for other thread
        lock.acquire()
        lock.release()

        # Die
        logging.fatal('fatal_with_other_threads message')
        while True:
            time.sleep(10000)
    finally:
        del v
Пример #7
0
def get_model():
    model_name = FLAGS.model_name

    if model_name == 'lda':
        from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
        return LinearDiscriminantAnalysis()

    if model_name == 'lr':
        from sklearn.linear_model import LogisticRegression
        return LogisticRegression()

    if model_name == 'svm':
        from sklearn.svm import LinearSVC
        dual = (FLAGS.input_name == '0_original')
        return LinearSVC(dual=dual, max_iter=100000)

    if model_name == 'nb':
        from sklearn.naive_bayes import BernoulliNB
        return BernoulliNB()

    if model_name == 'ft':
        from sklearn.tree import DecisionTreeClassifier
        return DecisionTreeClassifier()

    if model_name == 'rndfor':
        from sklearn.ensemble import RandomForestClassifier
        return RandomForestClassifier()

    if model_name == 'elasticnet':
        from sklearn.linear_model import ElasticNet
        return ElasticNet()

    logging.fatal(f'Unknown model {model_name}!')
Пример #8
0
def _package_xt(default_dist_path="./dist"):
    """
    Make package as wheel with `python3 setup.py bdist_wheel.

    :param default_dist_path:
    :return:
    """
    # # remove old zeus
    # if os.path.exists("zeus"):
    #     shutil.rmtree("zeus")

    if not os.path.exists("zeus"):
        shutil.copytree("../zeus",
                        "zeus",
                        ignore=shutil.ignore_patterns('*.pyc'))
        sleep(0.05)

    _cmd = "python3 setup.py bdist_wheel --universal"
    try:
        subprocess.call(
            [_cmd],
            shell=True,  # stdout=subprocess.PIPE
        )
    except subprocess.CalledProcessError as err:
        logging.fatal("catch err: {} when package into wheel".format(err))

    return default_dist_path
Пример #9
0
def get_dat(t):
    if FLAGS.debug:
        if not FLAGS.save_json:
            logging.fatal(
                'Need to specify name of the json file with --save_json')
        # 获取测试转债数据
        jf = open(FLAGS.save_json, 'r', encoding='utf-8')
        return json.loads(jf.read())
    else:
        # 排除未上市的
        payload = {'listed': 'Y'}
        newUrl = 'https://www.jisilu.cn/data/cbnew/cb_list/?___jsl=LST___t=%s' % int(
            t * 1000)
        logging.info(newUrl)
        # 最简单的爬虫请求.也可以加上headers字段,防止部分网址的反爬虫机制
        #  headers = {
        #  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
        #  }
        response = requests.post(newUrl, data=payload)
        # 当爬取的界面需要用户名密码登录时候,构建的请求需要包含auth字段
        data = response.content.decode('utf-8')
        if FLAGS.save_json:
            jf = open(FLAGS.save_json, 'w', encoding='utf-8')
            jf.write(data)
            jf.close()
        return json.loads(data)
Пример #10
0
  def read(self, manifest: str):
    """Reads in a single manifest file and records mapping.

    Args:
      manifest: path to manifest file.
    """
    with open(manifest) as f:
      for l in f:
        file_map = l.split()

        file_name = file_map[0]

        if len(file_map) == 1:
          self._all_files.append(file_name)
          self._mapped_files[file_name] = None

          logging.debug(' - %s - copy unique', file_name)
        elif len(file_map) == 2:
          file_mapped_name = file_map[1]

          self._all_files.append(file_name)
          self._mapped_files[file_name] = file_mapped_name

          self._global_manifest.add_mapping(file_name, file_mapped_name,
                                            manifest)

          logging.debug(' - %s - copy mapped - %s', file_name, file_mapped_name)
        else:
          logging.fatal('Unexpected line in manifest %s - %s', manifest, l)
Пример #11
0
def process_config() -> flags.FlagValues:
    config = flags.FLAGS

    # Process running experiment name.
    if (config.mode == "evaluate"
            or config.mode == "restore") and config.name == "auto":
        logging.fatal(
            "Cannot automatically generate a name for the chosen mode.")
    elif config.mode == "train" and config.name == "auto":
        config.name = os.path.join(time.strftime("%Y-%m-%d"),
                                   namegenerator.gen())
        logging.info("Experiment Name: {}".format(config.name))

    # Convert shapes to tuples.
    config.input_shape = tuple(config.input_shape)
    config.output_shape = tuple(config.output_shape)

    # Set directories to their appropriate paths.
    config.execution_dir = os.path.join(os.curdir, "executions", config.name)
    config.checkpoint_dir = os.path.join(config.execution_dir, "checkpoint")
    config.evaluate_dir = os.path.join(config.execution_dir, "result")
    config.log_dir = os.path.join(config.execution_dir, "log")
    config.presentation_dir = os.path.join(config.execution_dir,
                                           "presentation")
    config.summary_dir = os.path.join(config.execution_dir, "summary")

    create_directories(config,
                       directories=(config.checkpoint_dir, config.evaluate_dir,
                                    config.log_dir, config.presentation_dir,
                                    config.summary_dir))

    # Log out the command for using TensorBoard.
    print('tensorboard --logdir="{}" --host 127.0.0.1 --port 8080'.format(
        os.path.abspath(config.summary_dir)))
    return config
Пример #12
0
    def analyze(self, text: Union[str, List[str]]) -> Dict[str, Any]:
        """Analyze the tagging result of given text predicted by the ner model and return the
        result in pretty format with detailed information.

        Args:
            text: str or List of str. Can be a un-tokenized text, like ``'我在上海上学'`` or a
                tokenized (in char level) text sequence, like ``['我', '在', '上', '海', '上', '学']``.

        Returns:
            A Dict including the original text and list of recognized entities with detailed
            information (name, type, score, offset). Specifically, it will be like:
            {'text': '我在上海上学',
             'entities': [{'name': '上海',
                           'type': 'LOC',
                           'score': 0.9986118674278259,
                           'beginOffset': 2,
                           'endOffset': 4
                           }]
            }

        Notes:
            the score of entity is the probability of being a named-entity, it is computed by
            taking the average the probability of all the tokens within the entity, which is
            predicted by the ner model. However, if one use crf layer at the last layer of ner
            model, the score will be always 1. This is because the viterbi algorithm used by crf
            will output a definite best path instead of probability distribution.

        """
        if self.predictor:
            return self.predictor.pretty_tag(text)
        else:
            logging.fatal(
                'Predictor is None! Call fit() or load() to get predictor.')
Пример #13
0
 def _GetRitoLib(self):
   api_key = (self._params.riot.api_key or
              self._core.store.GetValue('riot_api_key', 'api_key'))
   if not api_key:
     logging.fatal('Rito API key failed to load.')
   channel = grpc.insecure_channel(self._params.riot.api_address)
   return rito_lib.RitoLib(self._core.proxy, channel, api_key)
Пример #14
0
 def SetRoomAction(self, room_action: RoomAction) -> None:
   self.room_action = room_action
   if room_action == RoomAction.NO_ROOM_ACTION:
     self._SetRomBits(3, 0x40, 0x00)
     self._SetRomBits(5, 0x07, 0x00)
   elif room_action == RoomAction.KILLING_ENEMIES_OPENS_SHUTTER_DOORS:
     self._SetRomBits(3, 0x40, 0x00)
     self._SetRomBits(5, 0x07, 0x01)
   elif room_action == RoomAction.MASTER_ENEMY:
     self._SetRomBits(3, 0x40, 0x00)
     self._SetRomBits(5, 0x07, 0x02)
   elif room_action == RoomAction.KILLING_THE_BEAST_OPENS_SHUTTER_DOORS:
     self._SetRomBits(3, 0x40, 0x00)
     self._SetRomBits(5, 0x07, 0x03)
   elif room_action == RoomAction.PUSHABLE_BLOCK_OPENS_SHUTTER_DOORS:
     self._SetRomBits(3, 0x40, 0x01)
     self._SetRomBits(5, 0x07, 0x04)
   elif room_action == RoomAction.PUSHABLE_BLOCK_MAKES_STAIRS_APPEAR:
     self._SetRomBits(3, 0x40, 0x01)
     self._SetRomBits(5, 0x07, 0x05)
   elif room_action == RoomAction.KILLING_ENEMIES_OPENS_SHUTTER_DOORS_AND_DROPS_ITEM:
     self._SetRomBits(3, 0x40, 0x00)
     self._SetRomBits(5, 0x07, 0x07)
   elif room_action == RoomAction.PUSHABLE_BLOCK_HAS_NO_EFFECT:
     self._SetRomBits(3, 0x40, 0x01)
     self._SetRomBits(5, 0x07, 0x00)
   elif room_action == RoomAction.KILLING_ENEMIES_OPENS_SHUTTER_DOORS_DROPS_ITEM_AND_MAKES_BLOCK_PUSHABLE:
     self._SetRomBits(3, 0x40, 0x01)
     self._SetRomBits(5, 0x07, 0x07)
   elif room_action == RoomAction.EXPERIMENTAL_6:
     self._SetRomBits(3, 0x40, 0x00)
     self._SetRomBits(5, 0x07, 0x06)
   else:
     log.fatal("Found undefined room action code: %d" % int(room_action))
     sys.exit(1)
Пример #15
0
def main(argv):
  del argv  # Unused.
  cmd_key = 'command_{}'.format(FLAGS.cmd)
  cmd_func = globals().get(cmd_key, None)
  if not cmd_func:
    logging.fatal('Command `%s` is not supported.', FLAGS.cmd)
  cmd_func()
Пример #16
0
def _x_limits(signal):
  """Computes the x-axis region to be displayed.

  Args:
     signal: Input signal.

  Returns:
     A tuple consisting of left and right limits.
  """
  xlim_left = 0
  xlim_right = signal.shape[1] - 1
  if FLAGS.x_axis_end >= signal.shape[1]:
    logging.fatal("Invalid time region: [%d, %d]",
                  FLAGS.x_axis_start, FLAGS.x_axis_end)
  elif FLAGS.x_axis_end > 0 and FLAGS.x_axis_end <= FLAGS.x_axis_start:
    logging.fatal("Invalid right rimit: [%d, %d]",
                  FLAGS.x_axis_start, FLAGS.x_axis_end)
  if FLAGS.x_axis_start > 0 and FLAGS.x_axis_end > 0:
    xlim_left = FLAGS.x_axis_start
    xlim_right = FLAGS.x_axis_end
  elif FLAGS.x_axis_start > 0:
    xlim_left = FLAGS.x_axis_start
  elif FLAGS.x_axis_end > 0:
    xlim_right = FLAGS.x_axis_end
  return xlim_left, xlim_right
Пример #17
0
    def _analyze(self, reference_actions):
        """ Runs the reference actions and returns the sequence of:
            1. Observations encountered by the reference_actions.
            2. Undiscounted Returns from each step.
            3. Physics States encountered by the reference actions.
        """
        time_step = self.env.reset()
        obs = np.concatenate(
            [time_step.observation[o].squeeze() for o in self.observables])
        observations, physics_data, rewards = [], [], []
        observations.append(np.copy(obs))

        for idx, act in enumerate(reference_actions):
            physics_data.append(self.env.physics.data.deepcopy())
            time_step = self.env.step(act)
            obs = np.concatenate(
                [time_step.observation[o].squeeze() for o in self.observables])
            rewards.append(time_step.reward)
            observations.append(np.copy(obs))
            if self.env.task._should_truncate:
                logging.fatal(f'Ref_action validation failed at step {idx}')
            if time_step.last():
                break

        self.ref_rewards = rewards
        returns = np.cumsum(rewards[::-1])[::-1]
        return observations, returns, physics_data
Пример #18
0
    def __init__(self, rnn_output_size, num_mixtures, rnn_model, cell_configs):
        """
        Autoregressive RNN Sequence decoder that outputs a sequence of points and pen states that constitute a sketch.

        Trained with teacher forcing during train time, the input at each timestep the ground truth data.
        During test time, we sample from the output distribution to obtain the input for the next timestep.
        :param rnn_output_size:
        :param num_mixtures:
        :param rnn_model:
        :param cell_configs:
        """
        super(DecoderDefault, self).__init__()

        self._rnn_model = rnn_model
        self._rnn_output_size = rnn_output_size
        self._num_mixtures = num_mixtures

        self._cell_configs = cell_configs

        if self._rnn_model == "hyper":
            self.cell = HyperLSTMCell(
                self._rnn_output_size,
                hyper_num_units=self._cell_configs["hyper_num_units"],
                hyper_embedding_size=self.
                _cell_configs["hyper_embedding_size"],
                use_recurrent_dropout=self.
                _cell_configs["use_recurrent_dropout"],
                recurrent_dropout_prob=self.
                _cell_configs["recurrent_dropout_prob"])
        else:
            logging.fatal("Invalid RNN Cell Selection: %s", self._rnn_model)
Пример #19
0
    def __init__(
        self,
        kernel_shape,
        multiplier: multiplier_impl.IMultiplier,
    ):
        super().__init__()

        if len(kernel_shape) not in (
                2,
                4,
        ):
            logging.fatal("unsupported kernel shape, "
                          "it is neither a dense kernel of length 2,"
                          " nor a convolution kernel of length 4")

        kernel_shape_excluding_output_dim = kernel_shape[:-1]
        kernel_add_ops = np.prod(kernel_shape_excluding_output_dim)

        # bias are associate with filters; each filter adds 1 bias
        bias_add = 1

        add_ops = kernel_add_ops + bias_add
        self.log_add_ops = int(np.ceil(np.log2(add_ops)))

        self.multiplier = multiplier
        self.output = quantizer_impl.QuantizedBits()
        self.output.bits = self.log_add_ops + self.multiplier.output.bits
        self.output.int_bits = self.log_add_ops + self.multiplier.output.int_bits
        self.output.is_signed = self.multiplier.output.is_signed
        self.output.op_type = "accumulator"

        assert not self.multiplier.output.is_floating_point
        self.output.is_floating_point = False
Пример #20
0
def _test_fatal_main_thread_only():
    """Test logging.fatal from main thread, no other threads running."""
    v = VerboseDel('fatal_main_thread_only main del called\n')
    try:
        logging.fatal('fatal_main_thread_only message')
    finally:
        del v
Пример #21
0
def _handle_errors(errors):
    """Log out and possibly reraise errors during import."""
    if not errors:
        return
    for name, err in errors:
        logging.warning("Module {} import failed: {}".format(name, err))
    logging.fatal("Please check these modules.")
    def files(self, split):
        """Returns set containing paths to TFDS TFRecord files for the dataset."""
        self.verify_split(split)
        files = set()

        def _get_builder_files(builder):
            split_info = builder.info.splits[split]
            if builder.version.implements(tfds.core.Experiment.S3):
                num_shards = len(split_info.shard_lengths)
            else:
                num_shards = split_info.num_shards
            return tfds.core.naming.filepaths_for_dataset_split(
                dataset_name=builder.name,
                split=split_info.name,
                num_shards=num_shards,
                data_dir=builder._data_dir,  # pylint:disable=protected-access
                filetype_suffix="tfrecord",
            )

        if self.builder.BUILDER_CONFIGS and "/" not in self.name:
            # If builder has multiple configs, and no particular config was
            # requested, then compute all.
            for config in self.builder.BUILDER_CONFIGS:
                builder_for_config = tfds.builder(self.builder.name,
                                                  config=config)
                files.update(_get_builder_files(builder_for_config))
        else:
            files.update(_get_builder_files(self.builder))

        if not files:
            logging.fatal("No TFRecord files found for dataset: %s", self.name)
        return files
Пример #23
0
def main(_):
    if not os.path.isfile(FLAGS.meta_path):
        logging.fatal(f"file {FLAGS.meta_path} not exits")

    with codecs.open(FLAGS.meta_path, 'r', 'utf-8') as f:
        content = f.read()

    FLAGS.retain_list = FLAGS.retain_list or ["python", "node"]

    try:
        meta = json.loads(content)
        logging.info(f"meta size: {len(meta)}", )
    except Exception as e:
        logging.fatal(f"load file {FLAGS.meta_path} failed {e}")

    with multiprocessing.Pool(processes=4) as pool:
        func = partial(collectPathInfo, FLAGS.retain_list, FLAGS.clean)
        results = pool.map(func, meta.items())

    n_root = Node("root")
    with codecs.open(FLAGS.result_path, 'w', 'utf-8') as f:
        for r in results:
            r.parent = n_root
        if FLAGS.show_stat:
            print(RenderTree(n_root).by_attr())
        print(RenderTree(n_root).by_attr(), file=f)
Пример #24
0
    def _make_episode_generator(self, episodes, shot, way, npz_collections):
        for episode_classes_string in episodes:
            episode_classes_list = episode_classes_string.decode('utf-8').split(",")
            episode_classes = random.sample(list(episode_classes_list), way)

            support = [[] for _ in range(len(npz_collections))]
            query = [[] for _ in range(len(npz_collections))]
            for class_file in episode_classes:
                try:
                    npz = np.load(class_file, allow_pickle=True, encoding='latin1')
                except FileNotFoundError as error:
                    logging.fatal("Shard not found when producing generator fn: %s", class_file)
                    raise error

                collections = [npz[key.decode('utf-8')] for key in npz_collections]

                sample_idxs = np.linspace(0., float(len(collections[0])-1), len(collections[0])).astype(np.int32)
                np.random.shuffle(sample_idxs)

                for idx, collection in enumerate(collections):
                    support[idx].append(collection[sample_idxs[:shot]])

                for idx, collection in enumerate(collections):
                    query[idx].append(collection[sample_idxs[shot:]])

            yield tuple([np.concatenate(x, axis=0) for x in support] + [np.concatenate(x, axis=0) for x in query])
Пример #25
0
def GetUserEntropyOnce(bit_num):
    if FLAGS.user_source == "coin":
        return GetCoinFlip(bit_num)
    elif FLAGS.user_source == "dice":
        return GetDiceRoll(bit_num)
    else:
        logging.fatal("Invalid --user_source.")
Пример #26
0
def build_optimizer(learning_rate,
                    optimizer_name='rmsprop',
                    decay=0.9,
                    epsilon=0.001,
                    momentum=0.9,
                    lars_weight_decay=None,
                    lars_epsilon=None):
    """Build optimizer."""
    if optimizer_name == 'sgd':
        logging.info('Using SGD optimizer')
        optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=learning_rate)
    elif optimizer_name == 'momentum':
        logging.info('Using Momentum optimizer')
        optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                               momentum=momentum)
    elif optimizer_name == 'rmsprop':
        logging.info('Using RMSProp optimizer')
        optimizer = tf.train.RMSPropOptimizer(learning_rate, decay, momentum,
                                              epsilon)
    elif optimizer_name == 'lars':
        logging.info('Using LARS optimizer')
        assert lars_weight_decay is not None, 'LARS weight decay is None.'
        assert lars_epsilon is not None, 'LARS epsilon is None.'
        optimizer = lars_optimizer.LARSOptimizer(
            learning_rate,
            momentum=momentum,
            weight_decay=lars_weight_decay,
            skip_list=['batch_normalization', 'bias', 'beta', 'gamma'],
            epsilon=lars_epsilon)
    else:
        logging.fatal('Unknown optimizer: %s', optimizer_name)

    return optimizer
Пример #27
0
def GetEntropy():
    """Returns an integer containing all the requested entropy."""
    entropy = 0
    if not FLAGS.use_user_entropy and not FLAGS.use_system_entropy:
        logging.fatal("No entropy source! Must enable --use_user_entropy \
        and/or --use_system_entropy.")
    if FLAGS.use_user_entropy:
        user_bits = GetUserBits(FLAGS.entropy)
        if FLAGS.verbose:
            print("User generated entropy:   {}".format(
                NumToBitString(user_bits)))
            print("                          {}".format(hex(user_bits)))
        entropy ^= user_bits
    if FLAGS.use_system_entropy:
        system_bits = secrets.randbits(FLAGS.entropy)
        if FLAGS.verbose:
            print("System generated entropy: {}".format(
                NumToBitString(system_bits)))
            print("                          {}".format(hex(system_bits)))
        entropy ^= system_bits
    # Print combined bits if it provides any additional insight.
    if FLAGS.use_user_entropy and FLAGS.use_system_entropy and FLAGS.verbose:
        # Just so indent matches above...
        if True:
            print("Combined entropy:         {}".format(
                NumToBitString(entropy)))
            print("                          {}".format(hex(entropy)))
    return entropy
Пример #28
0
    def __init__(self,
                 level_num: LevelNum = LevelNum.NO_LEVEL_NUM,
                 cave_type: CaveType = CaveType.NO_CAVE_TYPE,
                 room_num: Optional[RoomNum] = None,
                 position_num: Optional[int] = None):
        self.level_num_or_cave_type: LevelNumOrCaveType
        self.sub_id: RoomOrPositionNum

        if level_num != LevelNum.NO_LEVEL_NUM:
            assert level_num in Range.VALID_LEVEL_NUMBERS
            assert room_num in Range.VALID_ROOM_NUMBERS
            assert cave_type is CaveType.NO_CAVE_TYPE
            assert position_num is None
            self.level_num_or_cave_type = level_num
            self.sub_id = RoomOrPositionNum(room_num)

        elif cave_type is not None:
            assert cave_type in Range.VALID_CAVE_TYPES
            assert position_num in Range.VALID_CAVE_POSITION_NUMBERS
            assert level_num is LevelNum.NO_LEVEL_NUM
            assert room_num is None
            self.level_num_or_cave_type = cave_type
            self.sub_id = RoomOrPositionNum(position_num)

        else:
            log.fatal("Location: level or cave number must be specified")
Пример #29
0
def _time_smooth(signal):
  """Smoothes the input signal usually given in samples in time.

  In order to achieve this the input sample rate is required.

  Args:
     signal: Input signal in samples.

  Returns:
     Time-smoothed signal.
  """
  if FLAGS.input_sample_rate <= 0:
    logging.fatal("Invalid sampling rate: %d", FLAGS.input_sample_rate)
  samples_in_frame = int(
    _TIME_SMOOTHING_FRAME_LENGTH_SEC * FLAGS.input_sample_rate + 0.5)
  logging.info("Frame length: %d samples", samples_in_frame)
  num_frames = int(signal.shape[1] / samples_in_frame)
  logging.info("Duration: %d frames", num_frames)
  smoothed_signal = np.zeros(shape=(signal.shape[0], num_frames), dtype=np.float32)
  signal_offset = 0
  for n in range(num_frames):
    smoothed_signal[:, n] = np.average(
      signal[:, signal_offset:signal_offset + samples_in_frame], axis=1)
    signal_offset += samples_in_frame
  return smoothed_signal
def main(argv):
    """Entry point for prometheus_speedtest.py."""
    del argv  # unused
    if FLAGS.version:
        print('prometheus_speedtest v%s' % version.VERSION)
        return

    registry = core.CollectorRegistry(auto_describe=False)
    if FLAGS.servers and set(FLAGS.excludes).issuperset(FLAGS.servers):
        logging.fatal(
            '--excludes is a superset of --includes, no viable test server is '
            'possible. Ensure --excludes does not contain all --servers.')

    # TEST_ASDF_ASDF_ASDF_ASDF_ASDF_ASDF_ASDF_ASDF_ASDF_ASDFfoobar

    registry.register(
        RemoteSpeedtestCollector(servers=FLAGS.servers,
                                 excludes=FLAGS.excludes,
                                 remotes=FLAGS.remotes))

    metrics_handler = SpeedtestMetricsHandler.factory(registry)

    http = server.ThreadingHTTPServer((FLAGS.address, FLAGS.port),
                                      metrics_handler)

    logging.info('Starting HTTP server listening on %s:%s', FLAGS.address,
                 FLAGS.port)
    http.serve_forever()
Пример #31
0
def main(args):
  FLAGS(args)
  if FLAGS.verbose:
    logging.basicConfig(level=logging.INFO)
  if FLAGS.debug:
    logging.basicConfig(level=logging.DEBUG)
  logging.debug('binary: %s\noptimize: %d\nbase_directory: %s\n'
                'policy_file: %s\nrendered_acl_directory: %s',
                str(sys.argv[0]),
                int(FLAGS.optimize),
                str(FLAGS.base_directory),
                str(FLAGS.policy_file),
                str(FLAGS.output_directory))

  definitions = None
  try:
    definitions = naming.Naming(FLAGS.definitions_directory)
  except naming.NoDefinitionsError:
    err_msg = 'bad definitions directory: %s', FLAGS.definitions_directory
    logging.fatal(err_msg)
    sys.exit(1)

  # thead-safe list for storing files to write
  manager = multiprocessing.Manager()
  write_files = manager.list()

  with_errors = False
  if FLAGS.policy_file:
    # render just one file
    logging.info('rendering one file')
    RenderFile(FLAGS.policy_file, FLAGS.output_directory, definitions,
               FLAGS.exp_info, write_files)
  else:
    # render all files in parallel
    logging.info('finding policies...')
    pols = []
    pols.extend(DescendRecursively(FLAGS.base_directory, FLAGS.output_directory,
                                   definitions))

    pool = multiprocessing.Pool(processes=FLAGS.max_renderers)
    results = []
    for x in pols:
      results.append(pool.apply_async(RenderFile,
                                      args=(x.get('in_file'),
                                            x.get('out_dir'),
                                            definitions,
                                            FLAGS.exp_info,
                                            write_files)))
    pool.close()
    pool.join()

    for result in results:
      try:
        result.get()
      except (ACLParserError, ACLGeneratorError) as e:
        with_errors = True
        logging.warn('\n\nerror encountered in rendering process:\n%s\n\n', e)

  # actually write files to disk
  WriteFiles(write_files)

  if with_errors:
    logging.warn('done, with errors.')
    sys.exit(1)
  else:
    logging.info('done.')