示例#1
0
def debug_updated_weights(opts, steps, weights, data):
    """ Various debug plots for updated weights of training points.

    """
    assert data.num_points == len(weights), 'Length mismatch'
    ws_and_ids = sorted(zip(weights, range(len(weights))))
    num_plot = 20 * 16
    if num_plot > len(weights):
        return
    ids = [_id for w, _id in ws_and_ids[:num_plot]]
    plot_points = data.data[ids]
    metrics = metrics_lib.Metrics()
    metrics.make_plots(opts, steps, None, plot_points, prefix='d_least_')
    ids = [_id for w, _id in ws_and_ids[-num_plot:]]
    plot_points = data.data[ids]
    metrics = metrics_lib.Metrics()
    metrics.make_plots(opts, steps, None, plot_points, prefix='d_most_')
    plt.clf()
    ax1 = plt.subplot(211)
    ax1.set_title('Weights over data points')
    plt.plot(range(len(weights)), sorted(weights))
    plt.axis([0, len(weights), 0., 2. * np.max(weights)])
    if data.labels is not None:
        all_labels = np.unique(data.labels)
        w_per_label = -1. * np.ones(len(all_labels))
        for _id, y in enumerate(all_labels):
            w_per_label[_id] = np.sum(weights[np.where(data.labels == y)[0]])
        ax2 = plt.subplot(212)
        ax2.set_title('Weights over labels')
        plt.scatter(range(len(all_labels)), w_per_label, s=30)
    filename = 'data_w{:02d}.png'.format(steps)
    create_dir(opts['work_dir'])
    plt.savefig(o_gfile((opts["work_dir"], filename), 'wb'))
示例#2
0
    def __init__(self, metric_names):
        self.model_name = P.MODEL_ID
        self.setup_folders()

        initialize_logger(
            os.path.join(self.model_folder, 'log.txt').format(self.model_name))
        P.write_to_file(os.path.join(self.model_folder, 'config.ini'))
        logging.info(P.to_string())

        self.train_metrics = metrics.Metrics('train', metric_names,
                                             P.N_CLASSES)
        self.val_metrics = metrics.Metrics('validation', metric_names,
                                           P.N_CLASSES)
        self.epoch = -1
示例#3
0
def run(args):
    appMetrics = metrics.Metrics(args)

    data = data_set.DcsData(args, appMetrics)
    data.run()

    appMetrics.write()
示例#4
0
def Classify(expdir):
  saver.restore(session, os.path.join(expdir, 'model.bin'))

  context_var = params.context_vars[-1]
  placeholder = model.context_placeholders[context_var]
  lang_vocab = context_vocabs[context_var]
  vocab_subset = [w for w in lang_vocab.GetWords() if w != '<UNK>']
  print vocab_subset

  print 'preparing dataset'
  dataset.Filter(vocab_subset, context_var)
  dataset.Prepare(vocab, context_vocabs)

  results = []
  all_labels = []
  all_preds = []
  for pos in xrange(dataset.GetNumBatches()):
    if pos % 10 == 0:
      print pos
    batch = dataset.GetNextBatch()
    feed_dict = GetFeedDict(batch, use_dropout=False)
    labels = np.array(feed_dict[placeholder])

    def GetCosts():
      costs = []
      if use_nce_loss:
        feed_dict[placeholder][:] = lang_vocab[vocab_subset[0]]
        result = session.run([model.per_sentence_loss] + model.sampled_values, feed_dict)
        sentence_costs, sampled_vals = result[0], result[1:]
        costs.append(sentence_costs)
        # reuse the sampled values
        for i in range(len(model.sampled_values)):
          feed_dict[model.sampled_values[i][0]] = sampled_vals[i][0]
          feed_dict[model.sampled_values[i][1]] = sampled_vals[i][1]
          feed_dict[model.sampled_values[i][2]] = sampled_vals[i][2]

        for i in range(1, len(vocab_subset)):
          feed_dict[placeholder][:] = lang_vocab[vocab_subset[i]]
          costs.append(session.run(model.per_sentence_loss, feed_dict))
      else:  # full softmax
        for i in range(len(vocab_subset)):
          feed_dict[placeholder][:] = lang_vocab[vocab_subset[i]]
          costs.append(session.run(model.per_sentence_loss, feed_dict))
        
      return np.array(costs)

    costs = GetCosts()
    for label, c_array in zip(labels, costs.T):
      d = dict(zip(vocab_subset, c_array))
      d['label'] = lang_vocab[label]
      results.append(d)

    predictions = np.argmin(costs, 0)
    all_preds += [lang_vocab[vocab_subset[x]] for x in predictions]
    all_labels += list(labels)
  
  df = pandas.DataFrame(results)
  df.to_csv(os.path.join(expdir, 'classify.csv'))
  metrics.Metrics([lang_vocab[i] for i in all_preds],
                  [lang_vocab[i] for i in all_labels])
示例#5
0
def UnigramClassify(expdir):
  # turn the model into a linear classifier by using the softmax bias
  saver.restore(session, os.path.join(expdir, 'model.bin'))
  probs = tf.nn.softmax(model.base_bias + 
                        tf.transpose(model.bias_tables['subreddit']))
  log_probs = tf.log(probs).eval(session=session)
  
  lang_vocab = context_vocabs['subreddit']
  vocab_subset = lang_vocab.GetWords()

  print 'preparing dataset'
  dataset.Prepare(vocab, context_vocabs)

  preds = []
  labels = []
  for pos in xrange(dataset.GetNumBatches()):
    if pos % 10 == 0:
      print pos
    batch = dataset.GetNextBatch()
    
    for i in xrange(len(batch)):
      row = batch.iloc[i]
      scores = np.zeros(len(vocab_subset))

      for word_id in row.text[1:row.seq_lens]:
        scores += log_probs[:, word_id]
      preds.append(np.argmax(scores))
      labels.append(row['subreddit'])
  metrics.Metrics([lang_vocab[i] for i in preds],
                  [lang_vocab[i] for i in labels])
示例#6
0
	def output(self):
		if not self.localize_output:
			localization.disable()

		terminal.skip_escapes(not sys.stdout.isatty())
		terminal.set_stdout_encoding()
		previous_directory = os.getcwd()

		os.chdir(self.repo)
		absolute_path = basedir.get_basedir_git()
		os.chdir(absolute_path)
		format.output_header()
		outputable.output(changes.ChangesOutput(self.hard))

		if changes.get(self.hard).get_commits():
			outputable.output(blame.BlameOutput(self.hard, self.useweeks))

			if self.timeline:
				outputable.output(timeline.Timeline(changes.get(self.hard), self.useweeks))

			if self.include_metrics:
				outputable.output(metrics.Metrics())

			if self.responsibilities:
				outputable.output(responsibilities.ResponsibilitiesOutput(self.hard, self.useweeks))

			outputable.output(filtering.Filtering())

			if self.list_file_types:
				outputable.output(extensions.Extensions())

		format.output_footer()
		os.chdir(previous_directory)
示例#7
0
def main(argv):
    """Main body of the program."""
    parser = argparse.ArgumentParser(prog=argv[0])
    parser.add_argument('--port',
                        help='HTTP server port',
                        type=int,
                        default=8091)
    parser.add_argument('--config',
                        help='Configuration file (INI file)',
                        default='config.ini')
    parser.add_argument('--log_level',
                        help='Logging level (DEBUG, INFO, WARNING, ERROR)',
                        type=str,
                        default='INFO')
    parser.add_argument(
        '--include_inactive_devices',
        help=
        'Do not use; this flag has no effect and remains for compatibility only',
        action='store_true')
    args = parser.parse_args()

    try:
        level = getattr(logging, args.log_level)
    except AttributeError:
        print(f'Invalid --log_level: {args.log_level}')
        sys.exit(-1)
    args = parser.parse_args()

    logging.basicConfig(
        format=
        '%(asctime)s [%(name)24s %(thread)d] %(levelname)10s %(message)s',
        datefmt='%Y/%m/%d %H:%M:%S',
        level=level)

    logger.info('Starting up on port=%s', args.port)

    if args.include_inactive_devices:
        logger.warning(
            '--include_inactive_devices is now inoperative and will be removed in a future release'
        )

    try:
        cfg = config.Config(args.config)
    except:
        logger.exception('Could not load configuration: %s', args.config)
        sys.exit(-1)

    devices = cfg.devices
    if len(devices) == 0:
        logger.fatal(
            'No devices configured; please re-run this program with --create_device_cache.'
        )
        sys.exit(-2)

    prometheus_client.start_http_server(args.port)

    connect.ConnectionManager(metrics.Metrics().update, devices, cfg.hosts)

    _sleep_forever()
示例#8
0
    def __init__(self):
        # 环境初始化
        self.global_arg = arg.init_global_arg()
        env_arg = arg.init_env_arg(self.global_arg)
        # 增加nk的一个读入操作
        self.main_env = Env(env_arg)
        for model_type in ['st', 'ed']:
            if all_config['checkpoint']['env'][model_type]['enable']:
                self.main_env.nkmodel_load(all_config['checkpoint']['env']['path'], model_type)
            self.main_env.nkmodel_save(all_config["nkmodel_path"][model_type], model_type)
        # 个体初始化
        self.agents = []
        csv_head_agent = ['agent_no'] + ['st_state'] + ['st_value'] + ['insight'] + ['xplr'] + ['xplt'] + ['enable']
        moniter.AppendToCsv(csv_head_agent, all_config['agent_csv_path'])
        for i in range(self.global_arg["Nagent"]):
            # 个体随机初始位置
            start_st_label = [randint(0, self.main_env.P - 1) for j in range(self.main_env.N)]
            state_start = State(start_st_label)
            self.agents.append(Agent(arg.init_agent_arg(self.global_arg,
                                                        self.main_env.arg),
                                     self.main_env))
            self.agents[i].state_now = deepcopy(state_start)
            self.agents[i].agent_id = i

            # 去除了一开始给一个全局area,改为添加一个包含起点的点area
            start_area = Area(self.agents[i].state_now, [False] * self.main_env.N, 0)
            start_area.info = get_area_sample_distr(env=self.main_env, area=start_area, state=self.agents[i].state_now,
                                                    T_stmp=0, sample_num=1, dfs_r=1)
            start_area.sign = Sign(i, 0, 'start')
            self.agents[i].renew_m_info(start_area, 0)
            self.a_plan = None
            logging.info("state:%s, st_value:%s,insight:%.5s ,xplr:%.5s, xplt:%.5s, enable:%.5s" % (
                str(self.agents[i].state_now),
                self.main_env.getValue(self.agents[i].state_now, 0),
                self.agents[i].agent_arg['a']['insight'],
                self.agents[i].agent_arg['a']['xplr'],
                self.agents[i].agent_arg['a']['xplt'],
                self.agents[i].agent_arg['a']['enable']))
            # 记录agent信息
            csv_info_agent = ['agent%d' % i] \
                             + [self.agents[i].state_now] \
                             + [self.main_env.getValue(self.agents[i].state_now, 0)] \
                             + [self.agents[i].agent_arg['a']['insight']] \
                             + [self.agents[i].agent_arg['a']['xplr']] \
                             + [self.agents[i].agent_arg['a']['xplt']] \
                             + [self.agents[i].agent_arg['a']['enable']]
            moniter.AppendToCsv(csv_info_agent, all_config['agent_csv_path'])

        # 社会网络初始化
        soclnet_arg = arg.init_soclnet_arg(self.global_arg, env_arg)
        self.socl_net = SoclNet(soclnet_arg)
        self.socl_net.new_flat_init()  # 修改初始化方法
        # self.socl_net.flat_init()
        if all_config['checkpoint']['socl_network']['enable']:
            self.socl_net.power_load(all_config['checkpoint']['socl_network']['power'])
            self.socl_net.relat_load(all_config['checkpoint']['socl_network']['relat'])
        self.record = Record()

        self.metric = metrics.register_all_metrics(metrics.Metrics())
示例#9
0
    def output(self):
        if not self.localize_output:
            localization.disable()

        terminal.skip_escapes(not sys.stdout.isatty())
        terminal.set_stdout_encoding()
        previous_directory = os.getcwd()

        os.chdir(self.repo)
        isbare = subprocess.Popen("git rev-parse --is-bare-repository",
                                  shell=True,
                                  bufsize=1,
                                  stdout=subprocess.PIPE).stdout
        isbare = isbare.readlines()
        self.isbare = (isbare[0].decode("utf-8", "replace").strip() == "true")
        absolute_path = ""

        if self.isbare:
            absolute_path = subprocess.Popen("git rev-parse --git-dir",
                                             shell=True,
                                             bufsize=1,
                                             stdout=subprocess.PIPE).stdout
        else:
            absolute_path = subprocess.Popen("git rev-parse --show-toplevel",
                                             shell=True,
                                             bufsize=1,
                                             stdout=subprocess.PIPE).stdout

        absolute_path = absolute_path.readlines()
        if len(absolute_path) == 0:
            sys.exit(_("Unable to determine absolute path of git repository."))

        os.chdir(absolute_path[0].decode("utf-8", "replace").strip())
        format.output_header()
        outputable.output(changes.ChangesOutput(self.hard))

        if changes.get(self.hard).get_commits():
            outputable.output(blame.BlameOutput(self.hard))

            if self.timeline:
                outputable.output(
                    timeline.Timeline(changes.get(self.hard), self.useweeks))

            if self.include_metrics:
                outputable.output(metrics.Metrics())

            if self.responsibilities:
                outputable.output(
                    responsibilities.ResponsibilitiesOutput(self.hard))

            outputable.output(filtering.Filtering())

            if self.list_file_types:
                outputable.output(extensions.Extensions())

        format.output_footer()
        os.chdir(previous_directory)
示例#10
0
    def collect_metrics(self):
        """Sample system metrics and populate a metrics object suitable for publishing to Device Defender."""
        metrics_current = metrics.Metrics(last_metric=self._last_metric)

        self.network_stats(metrics_current)
        self.listening_ports(metrics_current)
        self.network_connections(metrics_current)

        self._last_metric = metrics_current
        return metrics_current
示例#11
0
def init_matchers(nlp_model, database_df, destination_urls):
    d = dict()
    mtr = m.Metrics('matcher')
    for url in destination_urls:
        mtr.start()
        phrase_list = list(database_df['Lemma'][database_df['URL'] == url])
        phrase_patterns = [nlp_model(text) for text in phrase_list]
        matcher = PhraseMatcher(nlp_model.vocab, attr="LEMMA")
        matcher.add(url, None, *phrase_patterns)
        d[url] = matcher
        mtr.stop()
    mtr.report()
    return d
示例#12
0
def GeoClassify(expdir):
  # classify tweets based on lat/long context
  saver.restore(session, os.path.join(expdir, 'model.bin'))

  print 'preparing dataset'
  dataset.Prepare(vocab, context_vocabs)

  classes = [
    {'lat': 40.4, 'lon': -3.7},  #madrid
    {'lat': 51.5, 'lon': -0.23},  # london
    {'lat': 40.7, 'lon': -74.0},  # nyc
    {'lat': 41.4, 'lon': 2.17},  # barcelona
    {'lat': 34.05, 'lon': -118.2},  # los angeles
    {'lat': 53.5, 'lon': -2.2}  # manchester
  ]
  names = ['madrid', 'london', 'nyc', 'barcelona', 'la', 'manchester']

  results = []
  all_labels = []
  all_preds = []
  for pos in xrange(dataset.GetNumBatches()):
    if pos % 10 == 0:
      print pos
    batch = dataset.GetNextBatch()
    feed_dict = GetFeedDict(batch, use_dropout=False)
    labels = []
    for lat, lon in zip(feed_dict[model.context_placeholders['lat']],
                        feed_dict[model.context_placeholders['lon']]):
      closest_dist = 300
      closest_class = -1
      for i in range(len(names)):
        d = helper.haversine(lon, lat, classes[i]['lon'], classes[i]['lat'])
        if d < closest_dist:
          closest_dist = d
          closest_class = names[i]
      labels.append(closest_class)
    labels = np.array(labels)

    costs = []
    for i in range(len(names)):
      feed_dict[model.context_placeholders['lat']][:] = classes[i]['lat']
      feed_dict[model.context_placeholders['lon']][:] = classes[i]['lon']
      costs.append(session.run(model.per_sentence_loss, feed_dict))
    costs = np.array(costs)

    predictions = np.argmin(costs, 0)
    for p, l in zip(np.squeeze(predictions), labels):
      if l != '-1':
        all_preds.append(p)
        all_labels.append(l)
  metrics.Metrics([names[i] for i in all_preds], all_labels)
示例#13
0
def train_and_measure(data, labels, k):
    model = KMeans(k)
    model.fit(data, max_iter=50, visualize=False)
    predictions = model.predict(data)
    metric = mtr.Metrics()

    wc = metric.wc_cluster_distances(data, predictions,
                                     model.cluster_centroids)
    start = time.time()
    sc = metric.silhouette_coefficient(data, predictions)
    # print("seconds %s" % (time.time() - start))
    nmi = metric.nmi_gain(data, labels, predictions)

    return model, wc, sc, nmi
示例#14
0
    def __init__(self, topic):
        self.consumer = KafkaConsumer(
            topic,
            bootstrap_servers=['localhost:9092'],
            auto_offset_reset='earliest',
            enable_auto_commit=True,
            auto_commit_interval_ms=5000,
            group_id=None,
            value_deserializer=lambda x: loads(x.decode('utf-8')))
        self.sentiment_analyzer = Sentiment()
        self.metrics = metrics.Metrics(consumer=self.consumer)

        size = 100
        self.x_vec = np.linspace(0, 1, size + 1)[0:-1]
        self.y_vec = np.zeros(len(self.x_vec))
        self.line1 = []
示例#15
0
    def run(self, timeseries):
        print('Neptune insights starting...')
        print('Listening for http requests on port 5000...')
        self.webserver = WebServer(self)
        self.webserver.start(5000)

        print('Connecting to timeseries at: ' + timeseries + '...')
        self.influx = influxdb_client.InfluxDBClient(
            url=timeseries,
            token=
            'Ku-vr2Vu70U47XRsUhNBRB2LoCkoSAQNEEzFc8Mncw72MLvQwaQf6ct0QERwzbN7Mhy8F16apCkkR5Obg0zhaw==',
            org='neptune')
        self.metrics = metrics.Metrics(self.influx)

        print('Neptune insights is started.')
        return self
示例#16
0
 def __init__(self,
              model,
              scaler,
              flags,
              save_path,
              metrics=metrics.Metrics()):
     self.model_wrapper = ModelWrapper(model)
     self.model_checkpoint_path = flags.save_dir
     self.scaler = scaler
     self.lr = flags.learning_rate
     self.write_summary = flags.write_summary
     self.encoder_dim = flags.encoder_dim
     self.decoder_dim = flags.decoder_dim
     self.batch_size = flags.batch_size
     self.metrics = metrics
     self.save_path = save_path
     logging.get_absl_logger().addHandler(logging_base.StreamHandler())
     return
示例#17
0
def debug_mixture_classifier(opts,
                             step,
                             probs,
                             points,
                             num_plot=320,
                             real=True):
    """Small debugger for the mixture classifier's output.

    """
    num = len(points)
    if len(probs) != num:
        return
    if num < 2 * num_plot:
        return
    sorted_vals_and_ids = sorted(zip(probs, range(num)))
    if real:
        correct = sorted_vals_and_ids[-num_plot:]
        wrong = sorted_vals_and_ids[:num_plot]
    else:
        correct = sorted_vals_and_ids[:num_plot]
        wrong = sorted_vals_and_ids[-num_plot:]
    correct_ids = [_id for val, _id in correct]
    wrong_ids = [_id for val, _id in wrong]
    idstring = 'real' if real else 'fake'
    logging.debug('Correctly classified %s points probs:' %\
                  idstring)
    logging.debug([val[0] for val, _id in correct])
    logging.debug('Incorrectly classified %s points probs:' %\
                  idstring)
    logging.debug([val[0] for val, _id in wrong])
    metrics = metrics_lib.Metrics()
    metrics.make_plots(opts,
                       step,
                       None,
                       points[correct_ids],
                       prefix='c_%s_correct_' % idstring)
    metrics.make_plots(opts,
                       step,
                       None,
                       points[wrong_ids],
                       prefix='c_%s_wrong_' % idstring)
def T1(ds1_path, ds2_path, year, k):
    year_graph = gr.YearGraph(ds1_path, ds2_path, year)
    year_graph.read()
    year_graph.weights_fraction_cocitations()
    year_graph.build_ds1_graph()

    metrics = me.Metrics(year_graph)
    metrics.metrics_preprocessing()
    metrics.pagerank(k)

    models = sp.SpreadOfInfluenceModels(year_graph, metrics.get_top_k_nodes())
    models.spread_preprocessing()
    models.threshold_deg()
    models.tipping_model()

    ov_top = ov.OverlapTopics(year_graph, models.get_influenced_nodes(), k)
    ov_top.overlap_preprocessing()
    ov_top.clique_percolation_method()
    ov_top.check_number_topics()
    ov_top.check_spread_topics()

    return ov_top.get_topics()
示例#19
0
    def __init__(self,
                 parameter=params.Params(),
                 network_name="CVANet_{0}".format(int(time.time())),
                 root_dir='experiments',
                 experiment_series='',
                 pretrained_network=''):
        """ Default initialisation function.

        @param parameter: An parameter object containing all relevant training parameters.
        @param network_name: The name of the network to be trained.
        @param root_dir: Root directory used for every output produced during training.
        @param experiment_series: Path relative to the root dir used for every output produced during training.
        @param pretrained_network: Path to a file containing pretrained model weights.
        """
        super(Train, self).__init__(parameter=parameter,
                                    network_name=network_name,
                                    root_dir=root_dir,
                                    experiment_series=experiment_series,
                                    pretrained_network=pretrained_network)

        # ---------------------------
        # Initialise optimiser and metrics
        # ---------------------------
        self.metrics_object = metrics.Metrics(
            basic_loss=parameter.loss_type,
            pos_class_weight=parameter.pos_class_weight,
            gmm_loss_weight=parameter.gmm_loss_weight,
            geometry_loss_weight=parameter.geometry_loss_weight,
            using_weighted_loss=parameter.using_weighted_loss,
            weighting_loss_func=parameter.weighting_loss_func)
        self.optimizer = tf.keras.optimizers.Adam(lr=parameter.learning_rate)

        if parameter.task_type == 'Classification':
            self.metric_names = ['Loss', 'Accuracy']
            metric_formats = ['{:.3f}', '{:.3f}']
        else:
            self.metric_names = ['Loss']
            metric_formats = ['{:.3f}']
        self.metric_tracking.add_metrics(self.metric_names, metric_formats)
示例#20
0
def main():
    #get user options/configurations
    opt = opts.get_opt()

    #set cuda visible devices if user specified a value
    if opt.gpus is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus

    #load class to save metrics, iamges and models to disk
    output = outputs.Outputs(opt)

    #load class to store metrics and losses values
    metric = metrics.Metrics()

    #load synthetic dataset
    healthy_dataloader_train, anomaly_dataloader_train = init_synth_dataloader_original(
        opt.folder_toy_dataset, opt.batch_size, mode='train')
    healthy_dataloader_val, anomaly_dataloader_val = init_synth_dataloader_original(
        opt.folder_toy_dataset, opt.batch_size, mode='val')

    net_g, net_r = init_model(opt)

    optim_g, optim_r = init_optimizer(opt, net_g=net_g, net_r=net_r)

    net_g = net_g.cuda()
    net_r = net_r.cuda()

    train(opt,
          healthy_dataloader_train,
          anomaly_dataloader_train,
          healthy_dataloader_val,
          anomaly_dataloader_val,
          net_g=net_g,
          net_r=net_r,
          optim_g=optim_g,
          optim_r=optim_r,
          output=output,
          metric=metric)
示例#21
0
    def __init__(self,
                 config,
                 agg_type,
                 debug=False,
                 verbose=False,
                 profile=None,
                 ignore_nosec=False):
        '''Get logger, config, AST handler, and result store ready

        :param config: config options object
        :type config: bandit.core.BanditConfig
        :param agg_type: aggregation type
        :param debug: Whether to show debug messages or not
        :param verbose: Whether to show verbose output
        :param profile_name: Optional name of profile to use (from cmd line)
        :param ignore_nosec: Whether to ignore #nosec or not
        :return:
        '''
        self.debug = debug
        self.verbose = verbose
        if not profile:
            profile = {}
        self.ignore_nosec = ignore_nosec
        self.b_conf = config
        self.files_list = []
        self.excluded_files = []
        self.b_ma = b_meta_ast.BanditMetaAst()
        self.skipped = []
        self.results = []
        self.baseline = []
        self.agg_type = agg_type
        self.metrics = metrics.Metrics()
        self.b_ts = b_test_set.BanditTestSet(config, profile)

        # set the increment of after how many files to show progress
        self.progress = b_constants.progress_increment
        self.scores = []
示例#22
0
def get_current_network():
    network_timestamp = request.args.get('timestamp')
    if(network_timestamp is None):
        curr_object = metrics.Metrics()
        return jsonify(curr_object.get_network())
示例#23
0
def main():
    parser = argparse.ArgumentParser(
        description='Run simulation on JSON file.')

    parser.add_argument('--json',
                        '-j',
                        action='store',
                        dest='json_file_name',
                        help='Use network stored in json file',
                        required=True)

    #option for tcp reno or tcp fast
    tcp_type = parser.add_mutually_exclusive_group(required=True)
    tcp_type.add_argument('--Reno',
                          dest='tcp_type',
                          action='store_const',
                          const='Reno',
                          help='Use the TCP-Reno congestion control algorithm')

    tcp_type.add_argument("--FAST",
                          dest='tcp_type',
                          action='store_const',
                          const='FAST',
                          help='Use the TCP-FAST congestion control algorithm')

    # options for graphing metrics
    metrics = parser.add_argument_group()
    metrics.add_argument('-m',
                         dest='metrics',
                         action='store_true',
                         help='Print graphs for metrics.\
                    Requires the following subarguments:')

    metricType = metrics.add_mutually_exclusive_group()

    metricType.add_argument('--more',
                            dest='log',
                            action='store_const',
                            const='more',
                            help='Prints a timetrace from collecting\
            all data.\
            Requires the -m argument.')

    metricType.add_argument('--less',
                            dest='log',
                            action='store_const',
                            const='less',
                            help='Prints a timetrace from collecting\
            a single datum per discrete time interval. \
            Subargument for the -m argument.')

    metricType.add_argument('--avg',
                            dest='log',
                            action='store_const',
                            const='avg',
                            help='Prints an approximate (average) timetrace\
            by collecting data over a discrete time interval. \
            Subargument for the -m argument.')

    metrics.add_argument('-l',
                         '--links',
                         nargs='+',
                         type=str,
                         action='store',
                         dest='links',
                         metavar='LinkID',
                         help='Specify which\
            links are to be logged. LinkID must given in the form\
            \'L1\', \'L2\', etc. Subargument for the -m argument.')

    metrics.add_argument('-f',
                         '--flows',
                         nargs='+',
                         type=str,
                         action='store',
                         dest='flows',
                         metavar='FlowID',
                         help='Specify which\
            flows are to be logged. FlowID must given in the form\
            \'F1\', \'F2\', etc. Subargument for the -m argument.')

    parser.add_argument('-v',
                        action='store_true',
                        dest='verbose',
                        help='verbose: prints out information about events,\
            event time, and number of elements in priority queue')

    args = parser.parse_args()
    # All subargs must be present if --m is invoked
    if not args.metrics and (args.log is not None or args.links is not None
                             or args.flows is not None):
        parser.print_usage()
        print "Error: -m argument is required."
        return
    # All subargs must be present if --m is invoked
    elif args.metrics and (args.log is None or args.links is None
                           or args.flows is None):
        parser.print_usage()
        print "Error: All of --m's subargments required."
        return

    f = open(args.json_file_name)

    parsed_data = json.loads(f.read())
    if args.verbose:
        print "JSON DATA:"
        pprint.pprint(parsed_data)

    devices = {}
    links = {}
    flows = {}

    print "\n\n"

    # Parse json data into data structures
    print "Iterating over hosts:"
    for host_name in parsed_data['hosts']:
        print "Host ", host_name, "has data: ", parsed_data['hosts'][host_name]
        host = classes.Host(str(host_name))
        devices[str(host_name)] = host

    print "Iterating over routers:"
    for router_name in parsed_data['routers']:
        print "Router ", router_name, "has data: ", parsed_data['routers'][
            router_name]
        router = classes.Router(str(router_name))
        devices[str(router_name)] = router
    print "Hosts and routers instantiated. ", "\n\n"

    print "Iterating over links and adding to hosts/routers:"
    for link_name in parsed_data['links']:
        link_data = parsed_data['links'][link_name]
        print "Link ", link_name, "has data: ", link_data

        link = classes.Link(str(link_name), link_data['link_rate'],
                            link_data['link_delay'], link_data['link_buffer'],
                            devices[link_data['devices'][0]],
                            devices[link_data['devices'][1]])
        links[str(link_name)] = link
    print "Links instantiated.", "\n\n"

    print "Iterating over flows:"
    for flow_name in parsed_data['flows']:
        flow_data = parsed_data['flows'][flow_name]
        print "Flow ", flow_name, "has data: ", flow_data

        flow = classes.Flow(str(flow_name), devices[flow_data['flow_src']],
                            devices[flow_data['flow_dest']],
                            flow_data['data_amt'], flow_data['flow_start'],
                            flow_data['theoRTT'])
        flows[str(flow_name)] = flow
    print "Flows instantiated.", "\n\n"

    # Verifying metric inputs from command line are correct
    if args.metrics:
        for flowID in args.flows:
            if flowID not in flows.keys():
                print "Bad flowID in argument list."
                return
        for linkID in args.links:
            if linkID not in links.keys():
                print "Bad linkID in argument list."
                return

    network = classes.Network(devices, links, flows)
    met = None
    if args.metrics:
        met = m.Metrics(args.log, args.flows, args.links)
    simulator = simulation.Simulator(network, args.tcp_type, met)

    # Generate initial routing table
    print "Running..."
    if args.verbose:
        print "Static routing:"

    simulator.staticRouting()
    while not simulator.q.empty():
        result = simulator.processEvent()
        if args.verbose:
            print "processing one event\n" + str(result)

    if args.verbose:
        print "------------NETWORK------------"
        print "----------DEVICE DETAILS----------"
        for device_name in devices:
            print devices[device_name]

        print "----------LINK DETAILS----------"
        for link_name in links:
            print links[link_name]

        print "----------FLOW DETAILS----------"
        for flow_name in flows:
            print flows[flow_name]

        print "----------STARTING SIMULATION------------"

    # Flows begin:
    for flow_name in flows:
        flow = flows[flow_name]

        counter = 0
        timer = flow.flow_start

        newGenEvent = simulation.Event(None, None, "INITIALIZEFLOW", timer,
                                       flow)
        simulator.insertEvent(newGenEvent)

    newDynamicRoutingEvent = simulation.Event(None, None, "REROUT",
                                              constants.REROUT_TIME, None)
    simulator.insertEvent(newDynamicRoutingEvent)

    while not simulator.q.empty():
        result = simulator.processEvent()
        if args.verbose:
            print "QUEUE SIZE: " + str(
                simulator.q.qsize()) + "\n" + str(result)

    for flow_name in flows:
        flow = flows[flow_name]
        print "DATA ACKNOWLEDGED: " + str(flow.data_acknowledged)
        print "DATA MADE: " + str(flow.data_amt)

    print "Simulation for ", args.json_file_name[:
                                                 -4], args.tcp_type, args.log, " done!"
    simulator.done()
示例#24
0
 def __init__(self, iterations, repetitions, time_limit=None):
     self.iterations = iterations
     self.repetitions = repetitions
     self.time_limit = time_limit
     self.m_metrics = metrics.Metrics()
     self.mcts_instance = mcts.Mcts(self.m_metrics)
示例#25
0
    print("Initializing matchers")
    matchers = init_matchers(nlp_model, database_df, destination_url_list)
    print("Matchers initialized")

    target = partial(get_match_df_from_single_url, nlp_model, matchers,
                     destination_url_list, input_class)

    proc_num = mp.cpu_count() - 1
    with mp.Pool(proc_num) as p:
        results = p.map(target, source_url_list)

    return results


if __name__ == '__main__':
    mo = m.Metrics('overall')
    mo.start()
    lang_model = load_nlp_model("pl_core_news_sm")
    url_list = load_url_list("Input/url_list3.txt")
    df_phrases = prepare_input_phrases_with_lemmas(lang_model,
                                                   "Input/Morele_ahrefs.xlsx")

    df_results = create_inlinks_report(lang_model,
                                       url_list,
                                       df_phrases,
                                       input_class='single-news-container')
    df = pd.concat(df_results)
    df.reset_index(drop=True, inplace=True)
    write_df_to_excel("Output/raporcik.xlsx", df)

    mo.stop()
示例#26
0
文件: schedule.py 项目: gargi/GSD
def hour_job():
    print('Hourly performance metrics are stored')
    metrics.Metrics().set_day_aggregate()
    metrics.Metrics().get_hour_metrics()
示例#27
0
文件: schedule.py 项目: gargi/GSD
def minute_job():
    print('Performance metrics are stored')
    metrics.Metrics().get_minute_metrics()
示例#28
0
def get_current_memory():
    memory_timestamp = request.args.get('timestamp')
    if(memory_timestamp is None):
        curr_object = metrics.Metrics()
        return jsonify(curr_object.get_memory())
示例#29
0
            "Must to specify the save flag with the output path: -s.")

    if results.out:
        output_sample_path = results.out

    #Save file flag
    if results.s:
        save_logs = True

    #Building graph, with weights calculation strategy or not
    year_graph = gr.YearGraph(ds1_path, ds2_path, y)
    year_graph.read()
    year_graph.weights_fraction_cocitations()
    year_graph.build_ds1_graph()

    metrics = me.Metrics(year_graph)
    metrics.metrics_preprocessing()
    metrics.pagerank(k)

    models = sp.SpreadOfInfluenceModels(year_graph, metrics.get_top_k_nodes())
    models.spread_preprocessing()

    if t == 0:
        models.threshold_deg()
        threshold = "degree"

    elif t == 1:
        models.threshold_half()
        threshold = "half"

    else:
示例#30
0
def get_current_disk():
    disk_timestamp = request.args.get('timestamp')
    if(disk_timestamp is None):
        curr_object = metrics.Metrics()
        return jsonify(curr_object.get_disk_usage())