Пример #1
0
    def __init__(self):
        self.time_connect_stats = Statistics()
        self.time_total_stats = Statistics()
        self.n_results = 0
        self.n_fatal_errors = 0

        self.n_attempts = 0
        self.n_429 = 0
        self.n_errors = 0
Пример #2
0
    def __init__(self):
        self.time_connect_stats = Statistics()
        self.time_total_stats = Statistics()
        self.n_results = 0
        self.n_fatal_errors = 0

        self.n_attempts = 0
        self.n_429 = 0
        self.n_errors = 0

        self.n_input_queries = 0
        self.n_extracted_queries = 0  # Queries answered without any type of error
        self.n_query_responses = 0
        self.n_billable_query_responses = 0  # Some errors are also billed
Пример #3
0
 def __init__(self, metric_funcs):
     self.metrics = {metric: Statistics() for metric in metric_funcs}
     self.metrics_values = {
         metric: [[] for _ in range(11)]
         for metric in metric_funcs
     }
     self.metric_funcs = metric_funcs
Пример #4
0
    def __init__(self, actions, epochs=3, batch_size=128, update_num=256,
                    gamma_e=0.99, gamma_i=0.99, lambda_=.95, learning_rate=0.0003,
                    grad_clip=0.5, **kargs):
        super(Trainer, self).__init__()

        self.agent_ppo=ppo_intris_value(actions, **kargs)
        self.agent_rnd=random_network_distilation(**kargs)

        self.epochs=epochs
        self.batch_size=batch_size
        self.update_num=update_num
        self.gamma_e=gamma_e
        self.gamma_i=gamma_i
        self.lambda_=lambda_
        self.grad_clip=grad_clip
        self.init_replay_buffer={
            'states': [],
            'next_states': [],
            'logits': [],
            'actions': [],
            'values_e': [],
            'values_i': [],
            'rewards_e': [],
            'rewards_i': [],
            'rewards_i_raw': [],
            'dones': [],
        }
        self.replay_buffer=deepcopy(self.init_replay_buffer)
        self.running_stats = Statistics()
        self.optimizer = tf.keras.optimizers.Adam(learning_rate, epsilon=1e-10)
Пример #5
0
def simulate(QUEUES, SERVERS):
    x = []
    y = []

    # we want to test the system while changing lambda
    for l in range(1, POINTS, 1):

        lambd = 2.0 * SERVERS * QUEUES * mu * l / POINTS  #service at most of arrival time

        env = simpy.Environment()

        # save stats
        env.stats = Statistics()

        # queues
        env.queues = [Servers(env, SERVERS, mu) for i in range(0, QUEUES)]

        # start the arrival process
        env.process(arrival(env, lambd))

        # simulate until SIM_TIME
        env.run(until=SIM_TIME)

        # print the mean response time
        #print("%.3f %.3f %.2f" % (mu, lambd, env.stats.mean()))

        x.append(lambd / (SERVERS * QUEUES * mu))
        y.append(env.stats.mean())

    return x, y
Пример #6
0
 def __init__(self, metric_funcs):
     """
     Args:
         metric_funcs (dict): A dict where the keys are metric names and the
             values are Python functions for evaluating that metric.
     """
     self.metrics = {metric: Statistics() for metric in metric_funcs}
Пример #7
0
    def __init__(self,
                 fnames,
                 labels,
                 root_dir,
                 train=True,
                 mean=None,
                 std=None):
        self.fnames = fnames
        self.labels = labels
        self.melspec_dir = root_dir
        self.mean = mean
        self.std = std

        self.fnames = [
            os.path.splitext(os.path.basename(fname))[0]
            for fname in self.fnames
        ]
        self.fnames = [
            self.melspec_dir + '/' + fname + '.npy' for fname in self.fnames
        ]

        self.transform = None
        self.pil = transforms.ToPILImage()
        if train:
            self.transform = albumentations_transform

        self._find_min_width()

        if self.mean is None:
            self.stats = Statistics()
            self._update_stats()
            self.mean = self.stats.mean()
            self.std = self.stats.stddev()
Пример #8
0
 def __init__(self, initial_states=None, use_stats=True):
     self.predictor_states = initial_states
     self.trgt_sentence = []
     self.score, self.base_score = 0.0, 0.0
     self.score_breakdown = []
     self.word_to_consume = None
     self.statistics = Statistics() if use_stats else None
Пример #9
0
def main():
    parser = argparse.ArgumentParser(description='NAS E2E Runs')
    parser.add_argument('--logdir',
                        type=str,
                        default='D:\\logdir\\azure\\random_cifar_test',
                        help='folder with logs')
    args, extra_args = parser.parse_known_args()

    lines = []
    top1s = []
    for filepath in pathlib.Path(args.logdir).rglob('logs.log'):
        epoch = 0
        for line in pathlib.Path(filepath).read_text().splitlines():
            if '[eval_test] Epoch: [  1/1] ' in line:
                top1s.append(Statistics())
                top1 = float(line.strip().split('(')[-1].split(',')[0].split(
                    '%')[0].strip()) / 100.0
                lines.append(f'{epoch}\t{top1}\t{str(filepath)}')
                top1s[epoch].push(top1)
                epoch += 1
    pathlib.Path(os.path.join(args.logdir,
                              'summary.tsv')).write_text('\n'.join(lines))

    stat_lines = ['epoch\tmean\tstddev\tcount']
    for i, top1 in enumerate(top1s):
        stat_lines.append(
            f'{i}\t{top1.mean()}\t{top1.stddev() if len(top1)>1 else float("NaN")}\t{len(top1)}'
        )
    pathlib.Path(os.path.join(args.logdir, 'summary_stats.tsv')).write_text(
        '\n'.join(stat_lines))
Пример #10
0
 def make_stats_array_recursive(shape):
     if len(shape) == 0:
         return Statistics()
     else:
         return [
             make_stats_array_recursive(shape[1:])
             for _ in range(shape[0])
         ]
Пример #11
0
 def __init__(self, metric_funcs, name=None):
     self.metrics = {
         metric: Statistics() for metric in metric_funcs
     }
     self.values = {
         metric: [] for metric in metric_funcs
     }
     self.metric_funcs = metric_funcs
     self.name = name
Пример #12
0
    def __init__(self):

        # variables of our model: the queue of "clients"
        self.waiting_line = Queue()

        # whether the server is idle or not
        self.idle = True

        # Statistics of the waiting time
        self.response_time = Statistics()
Пример #13
0
 def __init__(self, env: GoalEnv, skill_reset_steps: int):
     super().__init__(env)
     self._skill_reset_steps = skill_reset_steps
     self._skill_dim = env.observation_space["desired_goal"].shape[0]
     obs_dim = self.env.observation_space["observation"].shape[0]
     self.observation_space = gym.spaces.Box(-np.inf, np.inf, shape=(obs_dim + self._skill_dim, ))
     self.strategy = MVNStrategy(skill_dim=self._skill_dim)
     self._cur_skill = self.strategy.sample_skill()
     self._last_dict_obs = None
     self._goal_deltas_stats = [Statistics([1e-6]) for _ in range(self._skill_dim)]
Пример #14
0
 def __init__(self,
              want_max=True,
              want_mean=True,
              want_stdev=True,
              want_min=True):
     self.stat = Statistics()
     self.want_max = want_max
     self.want_mean = want_mean
     self.want_stdev = want_stdev
     self.want_min = want_min
Пример #15
0
def add_timing(name: str, elapsed: float, no_print=True) -> Statistics:
    global _timings
    stats = _timings.get(name, None)
    if stats is None:
        stats = Statistics()
        _timings[name] = stats
    stats.push(elapsed)

    if not no_print:
        logging.info('Timing "{}": {}s'.format(name, elapsed))
    return stats
Пример #16
0
 def __init__(self, metric_funcs, output_path, method):
     """
     Parameters
     ----------
     metric_funcs (dict): A dict where the keys are metric names and the values are Python functions for evaluating
     that metric.
     output_path: path to the output directory
     method: reconstruction method
     """
     self.metrics_scores = {metric: Statistics() for metric in metric_funcs}
     self.output_path = output_path
     self.method = method
Пример #17
0
 def __init__(self):
     try:
         self.encoder = PKCS7Encoder()
         self.runningStat = Statistics()
         self.rAvg = 0.0
         self.rDev = 0.0
         if "AES_KEY" in os.environ:
             self.skey = os.environ["AES_KEY"]
         else:
             self.skey = os.urandom(16)
         self.iv = ''.join(
             [chr(random.randint(0, 0xFF)) for i in range(16)])
     except Exception as e:
         print "Failed to Initialize NsuCryptoServer:" + str(e)
Пример #18
0
    def __init__(self, name, task_runner, max_workers=2):
        self.name = name
        self.reader = None
        self.writer = None

        self.message_prefix = f"{application_edge_marker}{self.name}{serial_sep}"

        self.stats = Statistics()
        self.executor = ProcessPoolExecutor(max_workers=max_workers)
        self._task_runner = task_runner

        self.ack_cond = asyncio.Condition()
        self.response_lock = asyncio.Lock()

        self.was_cancelled = False
Пример #19
0
 def sstdComp(self, data, id):
     if self.outl is None and self.score is None:
         self.outl = []
         self.score = []
     else:
         self.outl.clear()
         self.score.clear()
     if id not in self.stats:
         self.stats[id] = Statistics()
     for i in data[:]:
         self.stats[id].push(i)
     if self.stats[id].get_state()[0] > 1.0:
         sigma = self.stats[id].mean() + self.sigma*self.stats[id].stddev() 
         for i in range(0, len(data[:])):
             if data[i] >= sigma:
                 self.outl.append(-1)
                 self.score.append(abs(data[i] - self.stats[id].stddev()))
             else:
                 self.outl.append(1)
                 self.score.append(abs(data[i] - self.stats[id].stddev()))
Пример #20
0
    def plot_confidence_curve(self, save_dir):
        """ Creates a plot of confidence vs. each metric. """
        confidences = np.array(self.confidences)
        confidence_order = np.flip(confidences.argsort())
        for metric, values in self.metrics.items():
            stats = Statistics()
            cum_metrics = []
            for ex_idx in confidence_order:
                stats.push(values[ex_idx])
                cum_metrics.append(stats.mean())

            plt.plot(cum_metrics)
            plt.xlabel('Confidence Ranking')
            plt.ylabel(metric)
            save_pickle({
                'confidences': confidences,
                'metrics': cum_metrics
            }, os.path.join(save_dir, '{}.p'.format(metric)))
            plt.savefig(os.path.join(save_dir, '{}.png'.format(metric)))
            plt.clf()
Пример #21
0
def get_summary_text(log_key:str, out_dir:str, node_path:str, epoch_stats:List[EpochStats], seed_runs:int)->str:
    lines = ['','']

    lines.append(f'## Run: {log_key}\n')
    lines.append(f'### Metric Type: {node_path}\n')

    lines.append(f'Number of epochs: {len(epoch_stats)}\n')
    lines.append(f'Number of seeds: {seed_runs}\n')

    lines.append('\n')
    plot_filename = get_valid_filename(log_key + ':' + node_path)+'.png'
    plot_filepath = os.path.join(out_dir, plot_filename)
    plot_epochs(epoch_stats, plot_filepath)
    lines.append('')

    train_duration = Statistics()
    for epoch_stat in epoch_stats:
        train_duration += epoch_stat.train_fold.duration

    lines.append(f'![]({plot_filename})')

    lines.append(f'Train epoch time: {stat2str(train_duration)}')
    lines.append('')
    milestones = [0, 5, 30, 100, 200, 600, 1500]
    for milestone in milestones:
        if len(epoch_stats) >= milestone and len(epoch_stats[milestone-1].val_fold.top1)>0:
            lines.append(f'{stat2str(epoch_stats[milestone-1].val_fold.top1)} val top1 @ {milestone} epochs\n')
    # last epoch
    if not len(epoch_stats) in milestones:
        # find last epoch with valid stats
        last_epoch = len(epoch_stats)-1
        while last_epoch>=0 and len(epoch_stats[last_epoch].val_fold.top1)==0:
            last_epoch -= 1
        if last_epoch >=0:
            lines.append(f'{stat2str(epoch_stats[last_epoch].val_fold.top1)} val top1 @ {len(epoch_stats)} epochs [Last]\n')
        else:
            lines.append(f'[Last] No epoch with valid val stats found!')

    return '\n'.join(lines)
Пример #22
0
 def __init__(self,
              env: GoalEnv,
              skill_reset_steps: int = -1,
              skill_dim=None):
     super().__init__(env)
     self._env_is_ant = hasattr(env, "IS_ANT") and env.IS_ANT
     self._do_reset_skill = skill_reset_steps > -1
     self._skill_reset_steps = skill_reset_steps
     self._skill_dim = skill_dim or env.observation_space[
         "desired_goal"].shape[0]
     obs_dim = self.env.observation_space["observation"].shape[0]
     self.observation_space = gym.spaces.Box(-np.inf,
                                             np.inf,
                                             shape=(obs_dim +
                                                    self._skill_dim, ))
     self.strategy = MVNStrategy(skill_dim=self._skill_dim)
     self._cur_skill = self.strategy.sample_skill()
     self._last_dict_obs = None
     self._goal_deltas_stats = [
         Statistics([1e-6]) for _ in range(self._skill_dim)
     ]
     self._latest_goal_delta_stats = dict()
Пример #23
0
def simulate(QUEUES, SERVERS):
    y = []

    for i in range(0, SAMPLES):

        env = simpy.Environment()

        # save stats
        env.stats = Statistics()

        # queues
        env.queues = [Servers(env, SERVERS, mu) for i in range(0, QUEUES)]

        # start the arrival process
        env.process(arrival(env, lambd))

        # simulate until SIM_TIME
        env.run(until=SIM_TIME)

        y.append(env.stats.mean())

    return y
Пример #24
0
 def test_pushandrecalculate(self):
     rstat = Statistics()
     resp = self.app.get('/api/reset')
     self.assertEqual(resp.status_code, 200)
     test_array = [4, 7, 6, 9, 1]
     for i in test_array:
         resp = self.app.post('/api/pushandrecalculate', data=str(i))
     print resp.get_data(as_text=True)
     avg = 0
     for i in test_array:
         avg = avg + i
     avg = float(avg) / float(len(test_array))
     self.assertEqual(
         resp.get_data(
             as_text=True).split("{")[1].split(",")[0].encode('ascii'),
         str(avg))
     for i in test_array:
         rstat.push(i)
     self.assertEqual(
         str(rstat.stddev(ddof=0)),
         resp.get_data(
             as_text=True).split("}")[0].split(",")[-1].encode('ascii'))
     print "--- Push and Recalculate Statistics Validation Successful ---"
Пример #25
0
def get_summary_text(log_key: str, out_dir: str, node_path: str,
                     epoch_stats: List[EpochStats], seed_runs: int) -> str:
    lines = ['', '']

    lines.append(f'## Run: {log_key}\n')
    lines.append(f'### Metric Type: {node_path}\n')

    lines.append(f'Number of epochs: {len(epoch_stats)}\n')
    lines.append(f'Number of seeds: {seed_runs}\n')

    lines.append('\n')
    plot_filename = get_valid_filename(node_path) + '.png'
    plot_filepath = os.path.join(out_dir, plot_filename)
    plot_epochs(epoch_stats, plot_filepath)
    lines.append('')

    train_duration = Statistics()
    for epoch_stat in epoch_stats:
        train_duration += epoch_stat.train_fold.duration

    lines.append(f'![]({plot_filename})')

    lines.append(f'Train epoch time: {stat2str(train_duration)}')
    lines.append('')
    milestones = [35, 200, 600, 1500]
    for milestone in milestones:
        if len(epoch_stats) >= milestone:
            lines.append(
                f'{stat2str(epoch_stats[milestone-1].val_fold.top1)} val top1 @ {milestone} epochs\n'
            )
    # last epoch
    if not len(epoch_stats) in milestones:
        lines.append(
            f'{stat2str(epoch_stats[-1].val_fold.top1)} val top1 @ {len(epoch_stats)} epochs\n'
        )

    return '\n'.join(lines)
Пример #26
0
 def __init__(self, metric_funcs):
     self.metrics = {metric: Statistics() for metric in metric_funcs}
Пример #27
0
    # *********************************
    # setup and perform the simulation
    # *********************************

    # env = simpy.Environment()
    # stats = Statistics()

    # servers
    # env.servers = Servers(env, NUM_SERVERS, mu)  # service

    # start the arrival process
    pn_ls = []
    W_ls = []
    for j in range(LAMBDA):
        env = simpy.Environment()
        stats = Statistics()
        env.servers = Servers(env, NUM_SERVERS, mu)  # service
        env.process(arrival(environment=env, arrival_rate=j + 1))  # customers
        # simulate until SIM_TIME
        env.run(until=SIM_TIME)
        response_time.append(stats.mean())
        W_ls.append(average_time_in_queue(lambd=j+1, mu=mu))
        pn_ls.append(state_distributionMM1(lambd=j+1, mu=mu, N=STATE))
        plt.figure()
        plt.title(f'M/M/{NUM_SERVERS} state distribution, LAMBDA={j+1}, mu={mu}')
        plt.plot(np.arange(1, STATE+1), pn_ls[j])
        plt.xlabel('state')
        plt.ylabel('p(n)')
        plt.grid()
        plt.show()
Пример #28
0
 def __init__(self) -> None:
     self.top1 = Statistics()
     self.top5 = Statistics()
     self.duration = Statistics()
     self.step_time = Statistics()
Пример #29
0
 def __init__(self) -> None:
     self.start_lr = Statistics()
     self.end_lr = Statistics()
     self.train_fold = FoldStats()
     self.val_fold = FoldStats()
Пример #30
0
def run_extract_losses(args, cfg, save_dir, given_task):
    transfer = (cfg['model_type'] == architectures.TransferNet)
    if transfer:
        get_data_prefetch_threads_init_fn = utils.get_data_prefetch_threads_init_fn_transfer
        setup_input_fn = utils.setup_input_transfer
    else:
        setup_input_fn = utils.setup_input
        get_data_prefetch_threads_init_fn = utils.get_data_prefetch_threads_init_fn

    # set up logging
    tf.logging.set_verbosity(tf.logging.ERROR)
    stats = Statistics()
    print_every = int(args.print_every)

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        inputs = setup_input_fn(cfg,
                                is_training=False,
                                use_filename_queue=False)
        #RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
        #RuntimeDeterminedEnviromentVars.populate_registered_variables()
        max_steps = get_max_steps(inputs['max_steps'], args.data_split)
        # pdb.set_trace()
        # build model (and losses and train_op)
        model = utils.setup_model(inputs, cfg, is_training=False)
        loss_names, loss_ops = get_extractable_losses(cfg, model)
        if 'l1_loss' in loss_names:
            display_loss = 'l1_loss'
        elif 'l2_loss' in loss_names:
            display_loss = 'l2_loss'
        elif 'xentropy' in loss_names:
            display_loss = 'xentropy'
        elif 'metric_loss' in loss_names:
            display_loss = 'metric_loss'
        elif 'cycle_loss' in loss_names:
            display_loss = 'cycle_loss'
        else:
            display_loss = 'total_loss'

        # set up metrics to evaluate
        names_to_values, names_to_updates = setup_metrics(inputs, model, cfg)

        # execute training
        start_time = time.time()
        utils.print_start_info(cfg, max_steps, is_training=False)

        # start session and restore model
        training_runners = {
            'sess': tf.Session(),
            'coord': tf.train.Coordinator()
        }
        try:
            if cfg['model_path'] is None:
                print('Please specify a checkpoint directory')
                return
            print('Attention, model_path is ', cfg['model_path'])
            model['saver_op'].restore(training_runners['sess'],
                                      cfg['model_path'])

            # var = [v for v in tf.global_variables() if 'decoder' in v.name][0]
            # print(training_runners[ 'sess' ].run(var))

            utils.print_start_info(cfg, max_steps, is_training=False)
            data_prefetch_init_fn = get_data_prefetch_threads_init_fn(
                inputs, cfg, is_training=False, use_filename_queue=False)
            prefetch_threads = threading.Thread(
                target=data_prefetch_init_fn,
                args=(training_runners['sess'], training_runners['coord']))
            prefetch_threads.start()

            # run one example so that we can calculate some statistics about the representations
            filenames = []
            loss_names_to_vals = {name: [] for name in loss_names}
            results = training_runners['sess'].run([
                inputs['data_idxs'], inputs['target_batch'],
                inputs['mask_batch'], *loss_ops
            ])
            #gs = results[1]
            data_idx = results[0]
            losses = results[3:]
            target_input = results[1]
            mask_input = results[2]
            for i, name in enumerate(loss_names):
                loss_names_to_vals[name].append(losses[i])
            filenames.extend(data_idx)
            print("Step number: {}".format(1), (data_idx))
            # print(target_input, target_input.sum())
            # return
            # training_runners['sess'].run([v for v in tf.global_variables() if "transfer/rep_conv_1/weights" in v.name][0])
            # run the remaining examples
            start = time.perf_counter()
            for step in range(max_steps - 1):
                results = training_runners['sess'].run([
                    inputs['data_idxs'],
                    # [v for v in tf.global_variables() if "transfer/rep_conv_1/weights/(weights)" in v.name][0],
                    # model['model'].encoder_endpoints['net1_1_output'],
                    # model['model'].encoder_endpoints['net1_2_output'],
                    *loss_ops
                ])
                data_idx = results[0]
                # print(data_idx)
                losses = results[1:]
                # p, t, m = results[1], results[2], results[3]
                # losses = results[4:]

                # print(p.mean(), t)
                for i, name in enumerate(loss_names):
                    loss_names_to_vals[name].append(losses[i])
                filenames.extend(data_idx)
                stats.push(loss_names_to_vals[display_loss][-1])

                # baseline_loss = get_xentropy_loss(p, t, m)
                # tf_loss = loss_names_to_vals[display_loss][-1]
                # print('tf {} | ours {}'.format(tf_loss, baseline_loss))
                # pdb.set_trace()

                if step % print_every == 0 and step > 0:
                    print(
                        'Step {0} of {1}: ({5} loss: {2:.3f} || stddev: {3:.3f} :: ({4:.2f} secs/step)'
                        .format(
                            step,
                            max_steps - 1,
                            stats.mean(),
                            np.sqrt(stats.variance()),
                            # stats.variance(),
                            (time.perf_counter() - start) / print_every,
                            display_loss))
                    start = time.perf_counter()

                if training_runners['coord'].should_stop():
                    break

            print(
                'The size of losses is %s while we expect it to run for %d steps with batchsize %d'
                % (len(filenames), inputs['max_steps'], cfg['batch_size']))

            end_train_time = time.time() - start_time
            if args.out_name:
                out_name = args.out_name
            else:
                out_name = '{task}_{split}_losses.pkl'.format(
                    task=given_task, split=args.data_split)
            save_path = os.path.join(save_dir, out_name)

            with open(save_path, 'wb') as f:
                loss_names_to_vals['file_indexes'] = filenames
                loss_names_to_vals['global_step'] = 0
                pickle.dump(loss_names_to_vals, f)

            if args.out_dir:
                os.makedirs(args.out_dir, exist_ok=True)
                os.system("sudo cp {fp} {out}/".format(fp=save_path,
                                                       out=args.out_dir))
            else:
                if transfer:
                    copy_to = cfg['log_root']
                else:
                    copy_to = os.path.join(cfg['log_root'], given_task)
                os.system("sudo mv {fp} {dst}/".format(fp=save_path,
                                                       dst=copy_to))
                print("sudo mv {fp} {dst}/".format(fp=save_path, dst=copy_to))
                # if transfer:
                #     os.makedirs('/home/ubuntu/s3/model_log/losses_transfer/', exist_ok=True)
                #     os.system("sudo cp {fp} /home/ubuntu/s3/model_log/losses_transfer/".format(fp=save_path))
                # else:
                #     os.makedirs('/home/ubuntu/s3/model_log/losses/', exist_ok=True)
                #     os.system("sudo cp {fp} /home/ubuntu/s3/model_log/losses/".format(fp=save_path))

            print('saved losses to {0}'.format(save_path))
            print('time to extract %d epochs: %.3f hrs' %
                  (cfg['num_epochs'], end_train_time / (60 * 60)))
        finally:
            utils.request_data_loading_end(training_runners)
            utils.end_data_loading_and_sess(training_runners)