def select_metric_value(m): """Interactive selection of a metric value Input: m : list of values that can be unpacked into valid parameters for constructing a Metric Return: a valid index for the Metric """ m = Metric(*m) default_metric_value = m.index print("\n{0} {1} {2} {0}".format(10 * "+", m.name, m.short_name)) while True: for v in m.values: print(v, v.description) idx = input('Select one [{0}]: '.format(default_metric_value)).upper() if not idx: idx = default_metric_value print('Selected metric value ###|', idx, '|###') try: m.index = idx except AssertionError: print('Not valid') else: return m.index
def get_metrics_H1(datadir): files = ['quality_WC_Unigram.label', 'quality_WC_Unigam_NonZero.label', 'quality_WC_Unigam_Content.label', 'quality_WC_Unigam_OrgAssign.label', 'quality_WC_Unigam_Speciteller.label', 'quality_WC_Unigam_Title.label', 'quality_DT.label', 'quality_New.label', #'quality_New-Title.label', 'quality_firstnode.label' ] metric = Metric() body = [] for file in files: labels, predicts, _ = load_label(datadir + file) row = [file] row.append(metric.accuracy(labels, predicts)) row.append(metric.kappa(labels, predicts)) row.append(metric.QWkappa(labels, predicts)) body.append(row) output = datadir+'H1.txt' print output fio.WriteMatrix(output, body, header=None)
def get_metrics_H2b(datadir): metric = Metric() lectures = range(1, 9) feature = 'quality_CrossTopic_Rubric_firstnode_' feature_fixed = 'quality_CrossTopic_Rubric_firstnode_fixed_' for fold in lectures: input = datadir + feature + str(fold)+'_test.label' zero_file = datadir + feature + str(fold)+'_test_0.txt' output = datadir + feature_fixed + str(fold)+'_test.label' fix_firstnode(input, zero_file, output) body = [] for feature in [ 'quality_CrossTopic_WC_Unigram_', 'quality_CrossTopic_Rubric_', 'quality_CrossTopic_Rubric_firstnode_fixed_', 'quality_CrossTopic_DT_', ]: for lecture in lectures: file = feature + str(lecture)+'_test.label' print file labels, predicts, _ = load_label(datadir + file) row = [file] row.append(metric.accuracy(labels, predicts)) row.append(metric.kappa(labels, predicts)) row.append(metric.QWkappa(labels, predicts)) body.append(row) output = datadir+'H2b.txt' print output fio.WriteMatrix(output, body, header=None)
def get_metrics_H2c(datadir): metric = Metric() feature = 'quality_CrossCourse_Rubric_firstnode' feature_fixed = 'quality_CrossCourse_Rubric_firstnode_fixed' input = datadir + feature +'_test.label' zero_file = datadir + feature +'_test_0.txt' output = datadir + feature_fixed +'_test.label' fix_firstnode(input, zero_file, output) body = [] for feature in [ 'quality_CrossCourse_WC_Unigram', 'quality_CrossCourse_Rubric', 'quality_CrossCourse_Rubric_firstnode_fixed', 'quality_CrossCourse_DT', ]: file = feature+'_test.label' print file labels, predicts, _ = load_label(datadir + file) row = [file] row.append(metric.accuracy(labels, predicts)) row.append(metric.kappa(labels, predicts)) row.append(metric.QWkappa(labels, predicts)) body.append(row) output = datadir+'H2c.txt' print output fio.WriteMatrix(output, body, header=None)
def test_pair_dist_starting_at_zero_dist(self): sigma = 0.1 test_metric = Metric(sigma, edge=1) positions = [[0.2, 0.2], [0.2 + 2 * sigma, 0.2]] self.assertAlmostEqual( test_metric.pair_dist(positions[0], positions[1], 10, [1, 0]), 0, 1, "Should collide")
def invert_metric(self, g): """ Invert a given metric g and return the inverted metric as a shorthand dictionary of sympy.core.symbols.Symbol objects. """ N = int(np.sqrt(len(g.elements))) g_matrix = sp.MatrixSymbol('g_matrix', N, N) g_matrix = sp.Matrix(g_matrix) print('Converting the metric to sp.Matrix object...') for i in range(N): for j in range(N): m = g.index_dict[i] n = g.index_dict[j] mn = m + n g_matrix[i, j] = g.elements[mn] print('Inverting the matrix...') g_matrix_inv = g_matrix**(-1) g_inv = Metric(index_dict=g.index_dict) for i in range(N): for j in range(N): m = g_inv.index_dict[i] n = g_inv.index_dict[j] mn = m + n g_inv.elements[mn] = g_matrix_inv[i, j] print('Succesfully inverted the matrix.') return g_inv
def metrics_lines(api_key): if authenticator.valid(api_key): metrics = bottle.request.body.readlines() if type(metrics) == list: for line in metrics: try: parts = line.split() if len(parts) != 3: continue else: metric = { 'metric': parts[0], 'value': parts[1], 'timestamp': parts[2] } m = Metric(metric) m.enqueue(q) except ValueError: bottle.abort(400, "Invalid Metric Structure: %s" % (str(metric))) except Exception: bottle.abort(500, "Unable to store metric!") else: bottle.abort(400, "Metric structure must be lines of 'metric.path value timestamp'") else: bottle.abort(403, "API Key not valid")
def compute_suspicious_score(self): scores = [] for i in xrange(1, self.total_stmt + 1): cp, cf, np, nf = self.compute_statistics(i) metric = Metric(cp, cf, np, nf) scores.append(metric.odds_ratio()) return scores
def __init__(self, world): # call base Metric.__init__(self, world) self.name = "compressibility" self.value = 0.0 self.format = "%.2f%%" self.co = zlib.compressobj()
def evaluation(data_type,trec_path,dev_query_embedding2id,passage_embedding2id,dev_I,dev_D,test_set="trec2019",dev_split_num=-1,split_idx=-1): if data_type ==0 : if test_set== "marcodev": qrels="../data/raw_data/msmarco-docdev-qrels.tsv" elif test_set== "trec2019": qrels="../data/raw_data/2019qrels-docs.txt" elif data_type ==1: if test_set == "marcodev": qrels="../data/raw_data/qrels.dev.small.tsv" else: logging.error("wrong data type") exit() trec_path=trec_path.replace(".trec",".formatted.trec") met = Metric() if split_idx >= 0: split_file_path=qrels+f"{dev_split_num}_fold.split_dict" with open(split_file_path,'rb') as f: split=pickle.load(f) else: split=None ndcg10 = met.get_metric(qrels, trec_path, 'ndcg_cut_10',split,split_idx) mrr10 = met.get_mrr(qrels, trec_path, 'mrr_cut_10',split,split_idx) mrr100 = met.get_mrr(qrels, trec_path, 'mrr_cut_100',split,split_idx) print(f" evaluation for {test_set}, trec_file {trec_path}, split_idx {split_idx} \ ndcg_cut_10 : {ndcg10}, \ mrr_cut_10 : {mrr10}, \ mrr_cut_100 : {mrr100}" ) return ndcg10
def __init__(self, world): # call base Metric.__init__(self, world) self.name = "exploration" self.value = 0.0 self.format = "%.2f%%" # dictionary mapping hashes to ticks used for cycle detection self.rule_count = np.zeros(self.world.rule.size, dtype=int)
def testAttributes(self): metric = Metric() metric.setName("foo") metric.setValue(10) metric.setSource("localhost") self.assertEqual(metric.getName(), "foo", "Name does not match") self.assertEqual(metric.getValue(), 10, "Values does nto match") self.assertEqual(metric.getSource(), "localhost", "Source does not match")
def __init__(self, args): """ Create Evaluator object which handles the evaluation of a specified model, the tracking of computed metrics, and saving of results. :param args: ArgParse object which holds the path the experiment configuration file along with other key experiment options. """ # get experiment configuration file osj = os.path.join oss = os.path.split base_path = os.path.dirname(os.path.abspath(__file__)) config = get_config(base_path, args.config) self.config = config config_path = osj(base_path, 'config') # create experiment, logging, and model checkpoint directories model_name = os.path.split(args.model)[1].split('.')[0] self.eval_dir = osj(base_path, oss(args.config)[0], 'eval_' + args.data + '_' + model_name) self.visual_path = osj(self.eval_dir, 'visuals') if os.path.exists(self.eval_dir): print("Error: This evaluation already exists") print("Cancel Session: 0") print("Overwrite: 1") if input() == str(1): shutil.rmtree(self.eval_dir) else: exit(1) os.mkdir(self.eval_dir) os.mkdir(self.visual_path) # Device self.device = torch.device("cpu" if args.gpuid == 'cpu' else "cuda:{}".format(args.gpuid)) # Model - load pre-trained model = get_model(config) self.model = model self.load_model(args.model) self.model.to(self.device) # Loss metric self.criterion = nn.MSELoss() # Dataset and DataLoader self.data_type = args.data self.dataset = ShapeNet(config, config_path, args.data) self.tracked_samples = extract_categories(self.dataset.samples, 5) self.loader = DataLoader(self.dataset, batch_size=config['batch_size'], shuffle=True, num_workers=1, drop_last=False) print("Commencing evaluation with {} model on {} split".format(config['model'], args.data)) # Metrics self.metrics = Metric(config) self.metric_tracker = MetricTracker(config, self.eval_dir, args.data) self.epoch = 0
def __init__(self, world): # call base Metric.__init__(self, world) self.name = "cyclic" self.value = False # dictionary mapping hashes to ticks used for cycle detection self.hist = {} self.cycle_start = None self.cycle_period = None
def test_wall_dist(self): test_metric = Metric(sigma=0.1, edge=1) pos = [0.5, 0.5] for wall in [1, -1, 2, -2]: v_hat = [0, 0] v_hat[np.abs(wall) - 1] = np.sign(wall) dist_to_wall, wall_type = test_metric.wall_dist(pos, v_hat, 1) self.assertEqual(dist_to_wall, 0.4, "Wrong distance to wall") self.assertEqual(wall_type, wall, "Wrong Classification of wall")
def evaluate(self): test_featureset = self._get_featuresets(self.test_file) labels = [int(x[1]) for x in test_featureset] featureset = [x[0] for x in test_featureset] predicts = [int(x) for x in self._model.classify_many(featureset)] metric = Metric() return metric.accuracy(labels, predicts), metric.kappa(labels, predicts), metric.QWkappa(labels, predicts)
def test_metric_improvement(self): params = {"name": "logloss"} m = Metric(params) y_true = np.array([0, 0, 1, 1]) y_predicted = np.array([0, 0, 0, 1]) score_1 = m(y_true, y_predicted) y_true = np.array([0, 0, 1, 1]) y_predicted = np.array([0, 0, 1, 1]) score_2 = m(y_true, y_predicted) self.assertTrue(m.improvement(score_1, score_2))
def test_step_size(self): sigma = 0.1 test_metric = Metric(sigma, edge=1) positions = [[0.2, 0.2], [0.7, 0.2]] current_step_size, step_type, sphere_or_wall_ind = \ test_metric.step_size(positions, 0, [1, 1], .2) self.assertEqual(current_step_size, .2, "Problem with free step") current_step_size, step_type, sphere_or_wall_ind = \ test_metric.step_size(positions, 0, [1, 0], 1) self.assertAlmostEqual(current_step_size, .3, 1, "Problem with pair collision")
def test_wall_dist_starting_at_the_wall(self): test_metric = Metric(sigma=0.1, edge=1) pos = [0.1, 0.5] dist_to_wall, wall_type = test_metric.wall_dist(pos, [-1, 0], 1) self.assertEqual(dist_to_wall, 0, "Wrong distance to wall") self.assertEqual(wall_type, -1, "Wrong Classification of wall") dist_to_wall, wall_type = test_metric.wall_dist(pos, [0, 1], 1) self.assertEqual(dist_to_wall, 0.4, "Wrong distance to wall") self.assertEqual(wall_type, 2, "Wrong Classification of wall")
def __init__(self, init_list, goal_list): """Initialise Solver object. Raise ValueError if solution not possible.""" self.initial_state = copy.deepcopy(self.list_to_grid(init_list)) self.goal_state = copy.deepcopy(self.list_to_grid(goal_list)) self.frontier = [] self.explored = set() self.metrics = Metric(self.frontier)
def test_pair_dist(self): sigma = 0.1 test_metric = Metric(sigma, edge=1) positions = [[0.2, 0.2], [0.7, 0.2]] self.assertAlmostEqual( test_metric.pair_dist(positions[0], positions[1], 10, [1, 0]), 0.5 - 2 * sigma, 1, "Wrong distance between pairs") positions = [[0.2, 0.2], [0.7, 0.7]] self.assertAlmostEqual( test_metric.pair_dist(positions[0], positions[1], 10, [0, 1]), float('inf'), 1, "Should not collide")
def get_QWkappa(input): head,body = fio.ReadMatrix(input, True) metric = Metric() data = {} for i,row in enumerate(body): for coder, label in enumerate(row): if label == 'a': label = '0' label = int(label) if head[coder] not in data: data[ head[coder] ] = [] data[ head[coder] ].append(label) print 'annototor 1', '\t','annototor 2', '\t', 'accuracy', '\t', 'kappa', '\t', 'QWkappa' print head[0], '\t', head[1], '\t', metric.accuracy(data[head[0]], data[head[1]]), '\t', metric.kappa(data[head[0]], data[head[1]]), '\t', metric.QWkappa(data[head[0]], data[head[1]]) print head[0], '\t', head[2], '\t', metric.accuracy(data[head[0]], data[head[2]]), '\t', metric.kappa(data[head[0]], data[head[2]]), '\t', metric.QWkappa(data[head[0]], data[head[2]]) print head[1], '\t', head[2], '\t', metric.accuracy(data[head[1]], data[head[2]]), '\t', metric.kappa(data[head[1]], data[head[2]]), '\t', metric.QWkappa(data[head[1]], data[head[2]]) print '', '\t', 'Average', '\t', np.mean([metric.accuracy(data[head[0]], data[head[1]]), metric.accuracy(data[head[0]], data[head[2]]), metric.accuracy(data[head[1]], data[head[2]])]), '\t',\ np.mean([metric.kappa(data[head[0]], data[head[1]]), metric.kappa(data[head[0]], data[head[2]]), metric.kappa(data[head[1]], data[head[2]])]), '\t',\ np.mean([metric.QWkappa(data[head[0]], data[head[1]]), metric.QWkappa(data[head[0]], data[head[2]]), metric.QWkappa(data[head[1]], data[head[2]])]) print metric.confusion_matrix(data[head[0]], data[head[1]]) return 0
def test_eval(self): iris = load_iris() X_train, y_train = iris.data[:120], iris.target[:120] X_test, y_test = iris.data[120:], iris.target[120:] rf = Model('rf', RandomForestClassifier, {'random_state':0}) rf.run(X_train, y_train) metrics = rf.eval(X_test, y_test, metrics=['acc']) rf_metric = Metric('rf') rf_metric.addValue('acc', 0.7666666666666667) expected_metrics = rf_metric.getValues() self.assertEqual( metrics, expected_metrics )
def list_metrics(self, start_date, end_date): # Create CloudWatch client cloudwatch = boto3.client('cloudwatch') metrics = [] statistics_type = 'Average' period = 3600 #hour # List metrics through the pagination interface paginator = cloudwatch.get_paginator('list_metrics') for response in paginator.paginate(Namespace=self.namespace): for row in response['Metrics']: metric_name = row['MetricName'] for row2 in row['Dimensions']: dimension_name = row2['Name'] dimension_value = row2['Value'] if (self.namespace == 'AWS/EC2' and metric_name in ["CPUUtilization","NetworkOut","NetworkIn","EBSWriteBytes","EBSReadBytes","DiskReadBytes","DiskWriteBytes","DiskWriteOps","DiskReadOps"]) \ or self.namespace != 'AWS/EC2': metric = Metric(metric_name, self.namespace, dimension_name, dimension_value, statistics_type, period, start_date, end_date) metrics.append(metric) return metrics
def get_info(self): distance = None try: date = datetime.datetime.utcnow() GPIO.output(self.pin_trigger, GPIO.HIGH) time.sleep(0.00001) GPIO.output(self.pin_trigger, GPIO.LOW) while GPIO.input(self.pin_echo) == 0: pulse_start_time = time.time() while GPIO.input(self.pin_echo) == 1: pulse_end_time = time.time() pulse_duration = pulse_end_time - pulse_start_time distance = round(pulse_duration * 17150, 2) except Exception as e: logger.error(f'Error in distance: {e}') self.reset() if distance is not None: name = self.metric_prefix + 'distance' if self.output == 'WF': name = 'Distance' if len(self.metric_prefix) > 0: name = self.metric_prefix + '.' + name if self.format == 'f': distance = distance / 30.48 if self.format == 'i': distance = distance / 30.48 * 12 self.metrics.append(Metric(name, distance, date))
def soft_nms(bboxes, threshold=0.75): """ soft non-max suppression implementation for object detection. takes a list of bounding boxes with scores, decreases score of bounding boxes with respect to their IoU with the highest score bounding box :param bboxes: (array) list of bounding boxes in format [x1, y1, x2, y2, score] :param threshold: (scalar, float) minimum acceptable score for bounding box :return: filtered list of bounding boxes after soft non-max suppression """ bboxes_nms = [] while bboxes: # filter all the bboxes with score below the threshold bboxes = [bbox for bbox in bboxes if bbox[4] >= threshold] # grab the highest score bbox, remove it from the list and add it to output list maxscore_bbox = max(bboxes, key=lambda x: x[4]) bboxes.remove(maxscore_bbox) bboxes_nms.append(maxscore_bbox) # loop over all the possible overlaps with the selected bbox and update their scores bbox_pairs = [(maxscore_bbox, bbox) for bbox in bboxes] for maxscore_bbox, bbox2 in bbox_pairs: bbox2[4] -= Metric.iou(maxscore_bbox, bbox2) return bboxes_nms
def test1(n): from numpy import array # One equilibrium of Ar4 LJ cluster (in coordinates of # c2v_tetrahedron1 Func): w = 0.39685026 A = array([w, w, +w]) # Another equilibrium: B = array([w, w, -w]) # Halfway between A and B: C = (A + B) / 2.0 C = array([w + 0.01, w - 0.01, 0.0]) xs = array([A, C, B]) from test.testfuns import c2v_tetrahedron1, diagsandhight from path import MetricPath from metric import Metric from numpy import linspace z = c2v_tetrahedron1() # z = diagsandhight() # r = 1.12246195815 # A = array([r, r, r / sqrt(2.)]) # B = array([r, r, -r / sqrt(2.)]) # C = array([r, r * sqrt(2.), 0.]) # xs = array([A, C, B]) p = MetricPath(xs, Metric(z).norm_up) x0 = map(p, linspace(0., 1., n)) from ase import Atoms from qfunc import QFunc from func import compose pes = compose(QFunc(Atoms("Ar4")), z) from rc import Volume vol = compose(Volume(), z) def callback(x, e, g, t): # from pts.tools.jmol import jmol_view_path print "energies=", e # map(pes, x) print "volume=", map(vol, x) # jmol_view_path(map(z, x), syms=["Ar"]*4, refine=1) pass print "BEFORE:" callback(x0, map(pes, x0), map(pes.fprime, x0), None) x1, info = soptimize(pes, x0, tangent1, rc=vol, callback=callback) # print "info=", info print "AFTER:" callback(x1, map(pes, x1), map(pes.fprime, x1), None)
def convertDF(self, data, date): data = self.convertStringToList(data) length = len(data) for i in range(length): if i > 0: colLength = len(data[i]) for c in range(colLength): if c > 0 and c < 4: name = 'disk.' + data[0][c] tag = {} tag['filesystem'] = data[i][0] tag['mount'] = data[i][5] value = int(data[i][c]) metric = Metric(name, value, date) metric.tags = tag self.metrics.append(metric)
def train(self, data, prm): best_corr_de = -1e100 best_net = None patient = 0 for it in range(prm.max_epochs_num): Logger.write_log('iter = ' + str(it)) self.net.fit(x=data.Xtr, y=data.Ytr, batch_size=prm.batch_size, epochs=1, verbose=1, shuffle=True) data_pred = self.evaluate(data, prm) corr_de = Metric.report_all_metrics(data_pred) if corr_de > best_corr_de: Logger.write_log('nice, model gets better ...') Logger.write_log('###############################') best_corr_de = corr_de best_net = copy.deepcopy(self.net) patient = 0 else: Logger.write_log('oops, model gets worse ...') Logger.write_log('###############################') patient += 1 if patient >= prm.max_patience: break self.net = best_net
def get_metrics_cv(datadir, files): metric = Metric() _, _, folds = load_label(datadir + files[0]) body = [] for file in files: labels, predicts, _ = load_label(datadir + file) row = [file] row += metric.cv_accuracy(labels, predicts, folds) row += metric.cv_kappa(labels, predicts, folds) row += metric.cv_QWkappa(labels, predicts, folds) body.append(row) output = datadir+'H1_p.txt' print output fio.WriteMatrix(output, body, header=None)
def worker(self, train, test): ''' :parmas: train, 训练数据集 :params: test, 测试数据集 :return: 各指标的值 ''' print('Starting train data with function: {}'.format(self.rt)) if self.rt == 'LFM': getRecommendation, P, Q = self.alg[self.rt](train, self.ratio, self.K_latent, \ self.lr, self.step, self.lmbda, self.N) else: getRecommendation = self.alg[self.rt](train, self.K, self.N) # train print('Starting cacluate evaluation metrics with function: {}'.format( self.rt)) metric = Metric(train, test, getRecommendation) return metric.eval()
def testString(self): metric = Metric() metric.setName("bar") metric.setValue(100) metric.setSource("snafu") print(str(self)) self.assertEqual(str(metric), "bar 100 snafu", "String does not match")
def sample_sectors(self): """ Runs the process to sample the repository. :return: """ self.__sectors = self.__generate_sectors() for sector in self.__sectors: commits_in_sector = sector.get_objects() commits_count = len(commits_in_sector) if commits_count: m = Metric() m.commit_count = commits_count last_commit = commits_in_sector[commits_count - 1] m.commit = commits_in_sector[0] score = self.__score(commits_in_sector[0].id.hex, last_commit.id.hex, commits_count) m.activity = score[0] m.additions = score[1] m.deletions = score[2] m.timestamp = datetime.datetime.fromtimestamp( last_commit.commit_time) self.__metrics.append(m)
def get_info(self): current_value = GPIO.input(self.pin) while current_value == self.initial_value: #do nothing, wait for a change current_value = GPIO.input(self.pin) time.sleep(0.00001) # changed self.initial_value = current_value self.metrics.append(Metric(self.name, current_value, datetime.datetime.utcnow()))
def get_info(self): date = datetime.datetime.utcnow() humidity, temp = Adafruit_DHT.read_retry(self.code, self.pin) if temp is not None: name = self.metric_prefix + 'humidity' if self.output == 'WF': name = 'Humidity' if len(self.metric_prefix) > 0: name = self.metric_prefix + '.' + name self.metrics.append(Metric(name, humidity, date)) if self.format == 'f': temp = (temp * 9 / 5) + 32 name = self.metric_prefix + 'temperature' if self.output == 'WF': name = 'Temperature' if len(self.metric_prefix) > 0: name = self.metric_prefix + '.' + name self.metrics.append(Metric(name, temp, date))
def createMetric(category, metric, value): # Build a metric type name for the given metric m = METRIC_TYPE + ' / ' + category + ' / ' + metric # Return a metric description object return Metric(source=METRIC_SOURCE, metric=m, resource=METRIC_RESOURCE, node=METRIC_NODE, value=value)
def test_generate_normal(self): sys = SystemDataGenerator(name='test00') metric_vals = {'name': 'cpu_busy', 'min_val': 0, 'max_val': 10, 'mean': 10, 'sd': 3} metric = Metric(**metric_vals) metric_value = sys.generate_metric_value_normal(metric) assert metric_value[metric.name] > 0
def convertUptime(self, data, date): hasUser = False if "user" in data.lower().strip(): hasUser = True data = list(data.replace(",", "").split(' ')) if len(data) > 1: for i in range(len(data)): data[i] = list(filter(None, data[i].split(' '))) else: data = list(filter(None, data[0].split(' '))) data = [[data[0], data[1]], [data[2]], [data[4], data[5]], [data[6], data[7], data[8], data[9], data[10]]] #check if uptime has days uptime = 0 i = 0 if (hasUser and len(data) > 3) or (len(data) > 2 and not hasUser): if (len(data[0]) > 2): uptime += int(data[i][2]) * 24 * 60 i += 1 uptime += self.convertDurationToInt(data[i][0]) else: if len(data[i]) == 5: uptime = int(data[i][2]) * 24 * 60 uptime += self.convertDurationToInt(data[i][4]) elif len(data[i]) == 6: uptime = int(data[i][2]) * 24 * 60 uptime += int(data[i][4]) else: uptime += self.convertDurationToInt(data[i][2]) self.metrics.append(Metric("uptime", uptime, date)) i += 1 #users if hasUser: users = int(data[i][0]) self.metrics.append(Metric("users", users, date)) i += 1 #load minMean = float(data[i][2]) fiveMinMean = float(data[i][3]) fifteenMean = float(data[i][4]) self.metrics.append(Metric("load1min.mean", minMean, date)) self.metrics.append(Metric("load5min.mean", fiveMinMean, date)) self.metrics.append(Metric("load15min.mean", fifteenMean, date))
def test_create(self): params = {"name": "logloss"} m = Metric(params) y_true = np.array([0, 0, 1, 1]) y_predicted = np.array([0, 0, 1, 1]) score = m(y_true, y_predicted) self.assertTrue(score < 0.1) y_true = np.array([0, 0, 1, 1]) y_predicted = np.array([1, 1, 0, 0]) score = m(y_true, y_predicted) self.assertTrue(score > 1.0)
def __init__(self): self.con = Container() self.ret = self.con.runContainerId() self.dt,self.dt1,self.dt2,self.dt3,self.dt4 = {},{},{},{},{} for i in self.ret: self.con.setFilePath(i) self.data = Metric(self.con.container_id,self.con.cpuacct_path,self.con.memStat_path,self.con.blkio_path,self.con.netStat_path) self.data.getNetns() self.dt_cpu,self.dt_mem,self.dt_blk,self.dt_net = {},{},{},{} self.dt_cpu['cpuAcct'] = self.data.cpuAcct() self.dt_mem['memStat'] = self.data.memStat(i) self.dt_blk['blkio'] = self.data.blkio() self.dt_net['netStat'] = self.data.netStat() # we set the trunc id for the metric _i = i[:12] self.dt[_i]= [self.dt_cpu,self.dt_mem,self.dt_blk,self.dt_net] self.dt1[_i] = [self.dt_cpu] self.dt2[_i] = [self.dt_mem] self.dt3[_i] = [self.dt_blk] self.dt4[_i] = [self.dt_net]
def get_metrics_NewCourse(datadir): files = ['DT.txt', 'DT_NoneZero.txt', 'Rubric.txt', 'Rubric_NoneZero.txt', ] metric = Metric() body = [] for file in files: labels, predicts, _ = load_label(datadir + file) row = [file] row.append(metric.accuracy(labels, predicts)) row.append(metric.kappa(labels, predicts)) row.append(metric.QWkappa(labels, predicts)) body.append(row) output = datadir+'H_NewCourse.txt' print output fio.WriteMatrix(output, body, header=None)
def get_metrics_H1_CV(datadir): metric = Metric() folds = range(0, 10) #fix firstnode feature = 'quality_rubric_firstnode_' feature_fixed = 'quality_rubric_firstnode_fixed_' for fold in folds: input = datadir + feature + str(fold)+'_test.label' zero_file = datadir + feature + str(fold)+'_test_0.txt' output = datadir + feature_fixed + str(fold)+'_test.label' fix_firstnode(input, zero_file, output) body = [] for feature in ['quality_WC_Unigram_', 'quality_WC_Unigam_NonZero_', 'quality_WC_Unigam_Content_', 'quality_WC_Unigam_OrgAssign_', 'quality_WC_Unigam_Speciteller_', 'quality_WC_Unigam_Title_', 'quality_rubric_', 'quality_rubric_firstnode_fixed_', 'quality_DT_', ]: for fold in folds: file = feature + str(fold)+'_test.label' print file labels, predicts, _ = load_label(datadir + file) row = [file] row.append(metric.accuracy(labels, predicts)) row.append(metric.kappa(labels, predicts)) row.append(metric.QWkappa(labels, predicts)) body.append(row) output = datadir+'H1_cv.txt' print output fio.WriteMatrix(output, body, header=None)
def publish(api_key): if authenticator.valid(api_key): try: body = bottle.request.body.read() metrics = json.loads(body) except: log.error("Request unparsable: %s" % (body)) bottle.abort(400, "Unable to successfully parse JSON") if type(metrics) == list: for metric in metrics: try: m = Metric(metric) m.enqueue(q) except ValueError: bottle.abort(400, "Invalid Metric Structure: %s" % (str(metric))) except Exception: bottle.abort(500, "Unable to store metric!") else: bottle.abort(400, "Metric structure must be <list> containing n <dict> items") else: bottle.abort(403, "API Key not valid")
def get_metrics_H0(datadir): metric = Metric() body = [] for feature in ['quality_rubric', 'quality_binary_model', 'quality_New', 'quality_firstnode']: file = feature+'.label' print file labels, predicts, _ = load_label(datadir + file) fio.WriteMatrix(datadir + file + '.cm', metric.confusion_matrix(labels, predicts), None) row = [file] row.append(metric.accuracy(labels, predicts)) row.append(metric.kappa(labels, predicts)) row.append(metric.QWkappa(labels, predicts)) # row += metric.cv_accuracy(labels, predicts) # row += metric.cv_kappa(labels, predicts) # row += metric.cv_QWkappa(labels, predicts) body.append(row) output = datadir+'H0.txt' print output fio.WriteMatrix(output, body, header=None)
def __init__(self, user, password, ip, port, metric_labels): """Generate metrics list, connect to RabbitMQ server""" # Add valid metrics to list self.metric_list = [] for label in metric_labels: metric = Metric.create(label) if metric is not None: self.metric_list.append(metric) # Connect to RabbitMQ server credentials = PlainCredentials(user, password) basicConfig(format='%(levelname)s:%(message)s', level=CRITICAL) connection_parameters = ConnectionParameters(ip, port, '/', credentials) self.connection = BlockingConnection(connection_parameters) self.channel = self.connection.channel() self.host = gethostname() # Prepare request queue self.processing_request = False self.channel.exchange_declare(exchange='request', type='fanout') result = self.channel.queue_declare(exclusive=True) request_queue = result.method.queue self.channel.queue_bind(exchange='request', queue=request_queue) self.channel.basic_consume(self.request_metric, queue=request_queue, no_ack=True) # Prepare reply queue self.channel.queue_declare(queue='reply')
from metric import Metric import sympy as sp x=sp.symbols(['z','t','x','y']) gs=[sp.Symbol('g'+str(i))(x[0]) for i in x] M=Metric(x,sp.diag(*gs)) A=[sp.Symbol(t)(M.x[0]) for t in ['Az','At','Ax','Ay']] der=M.covariantVectorDerivative(A) assert(M.g.is_diagonal()) sp.pprint(sp.simplify(sum(der[i][i]*M.ginv[i,i] for i in range(len(M.x)))))
def __init__(self, ministry, zbins=None, magbins=None, catalog_type=['galaxycatalog'], tag=None, appmag=True, lower_limit=True, cutband=None, normed=True, selection_dict=None, **kwargs): """ Angular Number density of objects as a function of redshift. inputs -------- ministry - Ministry keywords ------ zbins - np.array An array containing the edges of the redshift bins to use for dn/dz lower_limit - boolean Whether or not the magnitudes in magbins should be interpreted as bin edges or lower limits of cuts (i.e. mag<magbins[i]) magbins - np.array A list of magnitudes to use as bin edges or lower limits of cuts catalog_type - list A list of catalog types, ususally not used tag - string A name for the metric to be used when making comparisons using bb-compare appmag - boolean True if we want to use apparent magnitude for cuts, false for absolute magnitudes cutband - int The index of the column of a vector of magnitudes to use normed - boolean Whether the metric integrates to N/deg^2 or not. Usually want True. """ Metric.__init__(self, ministry, tag=tag, **kwargs) self.catalog_type = catalog_type if zbins is None: self.zbins = np.linspace(0, 2.0, 61) else: self.zbins = zbins self.zbins = np.array(self.zbins) self.nzbins = len(self.zbins)-1 if appmag: self.mkey = 'appmag' defmbins = np.array([18, 19, 20, 21]) else: self.mkey = 'luminosity' defmbins = np.array([-24, -23, -22, -21]) if cutband is None: self.cutband = 0 else: self.cutband = cutband self.lower_limit = lower_limit if magbins is None: self.magbins = [None] self.nmagbins = 0 self.nomags = True else: self.nomags = False self.magbins = magbins if self.lower_limit: self.nmagbins = len(self.magbins) else: self.nmagbins = len(self.magbins) - 1 self.normed = normed self.aschema = 'galaxyonly' if self.nmagbins > 0: self.mapkeys = [self.mkey, 'redshift'] self.unitmap = {self.mkey :'mag'} else: self.mapkeys = ['redshift'] self.unitmap = {} #Make selection dict here if (selection_dict is None) & lower_limit: selection_dict = {'mag':{'selection_type':'cut1d', 'mapkeys':['appmag'], 'bins':self.magbins, 'selection_ind':self.cutband, 'lower':True}} elif (selection_dict is None): selection_dict = {'mag':{'selection_type':'binned1d', 'mapkeys':['appmag'], 'bins':self.magbins, 'selection_ind':self.cutband}} nmkeys = [] for s in selection_dict: if 'mapkeys' in selection_dict[s]: ss = selection_dict[s] for m in ss['mapkeys']: if m not in self.mapkeys: self.mapkeys.append(m) if m not in self.unitmap: self.unitmap[m] = self.defaultUnits(m) self.zcounts = None self.selector = Selector(selection_dict)
class Traffic: """ Traffic class definition : Traffic data Methods: Traffic(tmx) : constructor create(acid,actype,aclat,aclon,achdg,acalt,acspd) : create aircraft delete(acid) : delete an aircraft from traffic data update(sim) : do a numerical integration step findnearest(lat,lon) : find nearest a/c to lat/lon position trafperf () : calculate aircraft performance parameters Members: see create Created by : Jacco M. Hoekstra """ def __init__(self, tmx): self.tmx = tmx # tmx object contains sim, scr and other main objects self.dts = [] self.ntraf = 0 # model-specific parameters. # Default: BlueSky internal performance model. # Insert your BADA files to the folder "BlueSky/data/coefficients/BADA" # for working with EUROCONTROL`s Base of Aircraft Data revision 3.12 # Check for BADA OPF file path = os.path.dirname(__file__) + '/../../data/coefficients/BADA/' files = os.listdir(path) self.bada = False for f in files: if f.upper().find(".OPF")!=-1: self.bada=True break # Initialize correct performance models if self.bada: self.perf = PerfBADA(self) else: self.perf = Perf(self) self.dts = [] self.ntraf = 0 # Create datalog instance self.log = Datalog() # Traffic list & arrays definition # !!!IMPORTANT NOTE!!! # Anny variables added here should also be added in the Traffic # methods self.create() (append) and self.delete() (delete) # which can be found directly below __init__ # Traffic basic flight data # Traffic basic flight data self.id = [] # identifier (string) self.type = [] # aircaft type (string) self.lat = np.array([]) # latitude [deg] self.lon = np.array([]) # longitude [deg] self.trk = np.array([]) # track angle [deg] self.tas = np.array([]) # true airspeed [m/s] self.gs = np.array([]) # ground speed [m/s] self.cas = np.array([]) # callibrated airspeed [m/s] self.M = np.array([]) # mach number self.alt = np.array([]) # altitude [m] self.fll = np.array([]) # flight level [ft/100] self.vs = np.array([]) # vertical speed [m/s] self.rho = np.array([]) # atmospheric air density [m/s] self.temp = np.array([]) # atmospheric air temperature [K] self.dtemp = np.array([]) # delta t for non-ISA conditions # Traffic performance data self.avsdef = np.array([]) # [m/s]default vertical speed of autopilot self.aphi = np.array([]) # [rad] bank angle setting of autopilot self.ax = np.array([]) # [m/s2] absolute value of longitudinal accelleration self.bank = np.array([]) # nominal bank angle, [radian] self.bphase = np.array([]) # standard bank angles per phase self.hdgsel = np.array([]) # determines whether aircraft is turning # Crossover altitude self.abco = np.array([]) self.belco = np.array([]) # Traffic autopilot settings self.ahdg = [] # selected heading [deg] self.aspd = [] # selected spd(eas) [m/s] self.aptas = [] # just for initializing self.ama = [] # selected spd above crossover altitude (Mach) [-] self.aalt = [] # selected alt[m] self.afll = [] # selected fl [ft/100] self.avs = [] # selected vertical speed [m/s] # limit settings self.lspd = [] # limit speed self.lalt = [] # limit altitude self.lvs = [] # limit vertical speed due to thrust limitation # Traffic navigation information self.orig = [] # Four letter code of origin airport self.dest = [] # Four letter code of destination airport # LNAV route navigation self.swlnav = np.array([]) # Lateral (HDG) based on nav? self.swvnav = np.array([]) # Vertical/longitudinal (ALT+SPD) based on nav info self.actwplat = np.array([]) # Active WP latitude self.actwplon = np.array([]) # Active WP longitude self.actwpalt = np.array([]) # Active WP altitude to arrive at self.actwpspd = np.array([]) # Active WP speed self.actwpturn = np.array([]) # Distance when to turn to next waypoint # VNAV cruise level self.crzalt = np.array([]) # Cruise altitude[m] # Route info self.route = [] # ASAS info per aircraft: self.iconf = [] # index in 'conflicting' aircraft database self.asasactive = np.array([]) # whether the autopilot follows ASAS or not self.asashdg = np.array([]) # heading provided by the ASAS [deg] self.asasspd = np.array([]) # speed provided by the ASAS (eas) [m/s] self.asasalt = np.array([]) # speed alt by the ASAS [m] self.asasvsp = np.array([]) # speed vspeed by the ASAS [m/s] self.desalt = np.array([]) #desired altitude [m] self.deshdg =np.array([]) #desired heading self.desvs =np.array([]) #desired vertical speed [m/s] self.desspd =np.array([]) #desired speed [m/s] # Display information on label self.label = [] # Text and bitmap of traffic label self.trailcol = [] # Trail color: default 'Blue' # Area self.inside = [] # Transmitted data to other aircraft due to truncated effect self.adsbtime=np.array([]) self.adsblat=np.array([]) self.adsblon=np.array([]) self.adsbalt=np.array([]) self.adsbtrk=np.array([]) self.adsbtas=np.array([]) self.adsbgs=np.array([]) self.adsbvs=np.array([]) #----------------------------------------------------------------------------- # Not per aircraft data # Scheduling of FMS and ASAS self.t0fms = -999. # last time fms was called self.dtfms = 1.01 # interval for fms self.t0asas = -999. # last time ASAS was called self.dtasas = 1.00 # interval for ASAS # Flight performance scheduling self.perfdt = 0.1 # [s] update interval of performance limits self.perft0 = -self.perfdt # [s] last time checked (in terms of sim.t) self.warned2 = False # Flag: Did we warn for default engine parameters yet? # ASAS objects: Conflict Database self.dbconf = Dbconf(self,300., 5.*nm, 1000.*ft) # hard coded values to be replaced # Import navigation data base self.navdb = Navdatabase("global") # Read nav data from global folder # Traffic area: delete traffic when it leaves this area (so not when outside) self.swarea = False self.arealat0 = 0.0 # [deg] lower latitude defining area self.arealat1 = 0.0 # [deg] upper latitude defining area self.arealat0 = 0.0 # [deg] lower longitude defining area self.arealat1 = 0.0 # [deg] upper longitude defining area self.areafloor = -999999.0 # [m] Delete when descending through this h self.areadt = 5.0 # [s] frequency of area check (simtime) self.areat0 = -100. # last time checked # Taxi switch self.swtaxi = False # Default OFF: delete traffic below 1500 ft # Research Area ("Square" for Square, "Circle" for Circle area) self.area = "" # Metrics self.metricSwitch = 0 self.metric = Metric() # Bread crumbs for trails self.lastlat = [] self.lastlon = [] self.lasttim = [] self.trails = Trails() self.swtrails = False # Default switched off # ADS-B Coverage area self.swAdsbCoverage = False # Noise (turbulence, ADBS-transmission noise, ADSB-truncated effect) self.setNoise(False) self.eps = np.array([]) return def create(self, acid, actype, aclat, aclon, achdg, acalt, acspd): """Create an aircraft""" # Check if not already exist if self.id.count(acid.upper()) > 0: return # already exists do nothing # Increase number of aircraft self.ntraf = self.ntraf + 1 # Process input self.id.append(acid.upper()) self.type.append(actype) self.lat = np.append(self.lat, aclat) self.lon = np.append(self.lon, aclon) self.trk = np.append(self.trk, achdg) # TBD: add conversion hdg => trk self.alt = np.append(self.alt, acalt) self.fll = np.append(self.fll, (acalt)/100) self.vs = np.append(self.vs, 0.) self.rho = np.append(self.rho, density(acalt)) self.temp = np.append(self.temp, temp(acalt)) self.dtemp = np.append(self.dtemp, 0) # at the moment just ISA conditions self.tas = np.append(self.tas, acspd) self.gs = np.append(self.gs, acspd) self.cas = np.append(self.cas, tas2cas(acspd, acalt)) self.M = np.append (self.M, tas2mach(acspd, acalt)) # AC is initialized with neutral max bank angle self.bank = np.append(self.bank, 25.) if self.ntraf<2: self.bphase = np.deg2rad(np.array([15,35,35,35,15,45])) self.hdgsel = np.append(self.hdgsel, False) #------------------------------Performance data-------------------------------- # Type specific data #(temporarily default values) self.avsdef = np.append(self.avsdef, 1500. * fpm) # default vertical speed of autopilot self.aphi = np.append(self.aphi, radians(25.)) # bank angle setting of autopilot self.ax = np.append(self.ax, kts) # absolute value of longitudinal accelleration # Crossover altitude self.abco = np.append(self.abco, 0) self.belco = np.append(self.belco, 1) # performance data self.perf.create(actype) # Traffic autopilot settings: hdg[deg], spd (CAS,m/s), alt[m], vspd[m/s] self.ahdg = np.append(self.ahdg, achdg) # selected heading [deg] self.aspd = np.append(self.aspd, tas2eas(acspd, acalt)) # selected spd(eas) [m/s] self.aptas = np.append(self.aptas, vcas2tas(self.aspd, self.alt)) # [m/s] self.ama = np.append(self.ama, 0.) # selected spd above crossover (Mach) [-] self.aalt = np.append(self.aalt, acalt) # selected alt[m] self.afll = np.append(self.afll, (acalt/100)) # selected fl[ft/100] self.avs = np.append(self.avs, 0.) # selected vertical speed [m/s] # limit settings: initialize with 0 self.lspd = np.append(self.lspd, 0.0) self.lalt = np.append(self.lalt, 0.0) self.lvs = np.append(self.lvs, 0.0) # Traffic navigation information self.dest.append("") self.orig.append("") # LNAV route navigation self.swlnav = np.append(self.swlnav, False) # Lateral (HDG) based on nav self.swvnav = np.append(self.swvnav, False) # Vertical/longitudinal (ALT+SPD) based on nav info self.actwplat = np.append(self.actwplat, 89.99) # Active WP latitude self.actwplon = np.append(self.actwplon, 0.0) # Active WP longitude self.actwpalt = np.append(self.actwpalt, 0.0) # Active WP altitude self.actwpspd = np.append(self.actwpspd, -999.) # Active WP speed self.actwpturn = np.append(self.actwpturn, 1.0) # Distance to active waypoint where to turn # VNAV cruise level self.crzalt = np.append(self.crzalt,-999.) # Cruise altitude[m] <0=None # Route info self.route.append(Route(self.navdb)) # create empty route connected with nav databse # ASAS info: no conflict => -1 self.iconf.append(-1) # index in 'conflicting' aircraft database self.asasactive = np.append(self.asasactive, False) self.asashdg = np.append(self.asashdg, achdg) self.asasspd = np.append(self.asasspd, tas2eas(acspd, acalt)) self.asasalt = np.append(self.asasalt, acalt) self.asasvsp = np.append(self.asasvsp, 0.) # Area variable set to False to avoid deletion upon creation outside self.inside.append(False) # Display information on label self.label.append(['', '', '', 0]) # Bread crumbs for trails self.trailcol.append(self.trails.defcolor) self.lastlat = np.append(self.lastlat, aclat) self.lastlon = np.append(self.lastlon, aclon) self.lasttim = np.append(self.lasttim, 0.0) # ADS-B Coverage area self.swAdsbCoverage = False # Transmitted data to other aircraft due to truncated effect self.adsbtime=np.append(self.adsbtime,np.random.rand(self.trunctime)) self.adsblat=np.append(self.adsblat,aclat) self.adsblon=np.append(self.adsblon,aclon) self.adsbalt=np.append(self.adsbalt,acalt) self.adsbtrk=np.append(self.adsbtrk,achdg) self.adsbtas=np.append(self.adsbtas,acspd) self.adsbgs=np.append(self.adsbgs,acspd) self.adsbvs=np.append(self.adsbvs,0.) self.eps = np.append(self.eps, 0.01) return def delete(self, acid): """Delete an aircraft""" try: # prevent error due to not found idx = self.id.index(acid) except: return False del self.id[idx] del self.type[idx] # Traffic basic data self.lat = np.delete(self.lat, idx) self.lon = np.delete(self.lon, idx) self.trk = np.delete(self.trk, idx) self.alt = np.delete(self.alt, idx) self.fll = np.delete(self.fll, idx) self.vs = np.delete(self.vs, idx) self.tas = np.delete(self.tas, idx) self.gs = np.delete(self.gs, idx) self.cas = np.delete(self.cas, idx) self.M = np.delete(self.M, idx) self.T = np.delete(self.T, idx) self.p = np.delete(self.p, idx) self.rho = np.delete(self.rho, idx) self.temp = np.delete(self.temp, idx) self.dtemp = np.delete(self.dtemp, idx) self.hdgsel = np.delete(self.hdgsel, idx) self.bank = np.delete(self.bank, idx) # Crossover altitude self.abco = np.delete(self.abco, idx) self.belco = np.delete(self.belco, idx) # Type specific data (temporarily default values) self.avsdef = np.delete(self.avsdef, idx) self.aphi = np.delete(self.aphi, idx) self.ax = np.delete(self.ax, idx) # performance data self.perf.delete(idx) # Traffic autopilot settings: hdg[deg], spd (CAS,m/s), alt[m], vspd[m/s] self.ahdg = np.delete(self.ahdg, idx) self.aspd = np.delete(self.aspd, idx) self.ama = np.delete(self.ama, idx) self.aptas = np.delete(self.aptas, idx) self.aalt = np.delete(self.aalt, idx) self.afll = np.delete(self.afll, idx) self.avs = np.delete(self.avs, idx) # limit settings self.lspd = np.delete(self.lspd, idx) self.lalt = np.delete(self.lalt, idx) self.lvs = np.delete(self.lvs, idx) # Traffic navigation variables del self.dest[idx] del self.orig[idx] self.swlnav = np.delete(self.swlnav, idx) self.swvnav = np.delete(self.swvnav, idx) self.actwplat = np.delete(self.actwplat, idx) self.actwplon = np.delete(self.actwplon, idx) self.actwpalt = np.delete(self.actwpalt, idx) self.actwpspd = np.delete(self.actwpspd, idx) self.actwpturn = np.delete(self.actwpturn, idx) # VNAV cruise level self.crzalt = np.delete(self.crzalt, idx) # Route info del self.route[idx] # ASAS info del self.iconf[idx] self.asasactive=np.delete(self.asasactive, idx) self.asashdg=np.delete(self.asashdg, idx) self.asasspd=np.delete(self.asasspd, idx) self.asasalt=np.delete(self.asasalt, idx) self.asasvsp=np.delete(self.asasvsp, idx) self.desalt=np.delete(self.desalt, idx) self.desvs=np.delete(self.desvs, idx) self.desspd=np.delete(self.desspd, idx) self.deshdg=np.delete(self.deshdg, idx) # Metrics, area del self.inside[idx] # Traffic display data: label del self.label[idx] # Delete bread crumb data self.lastlat = np.delete(self.lastlat, idx) self.lastlon = np.delete(self.lastlon, idx) self.lasttim = np.delete(self.lasttim, idx) del self.trailcol[idx] # Transmitted data to other aircraft due to truncated effect self.adsbtime=np.delete(self.adsbtime,idx) self.adsblat=np.delete(self.adsblat,idx) self.adsblon=np.delete(self.adsblon,idx) self.adsbalt=np.delete(self.adsbalt,idx) self.adsbtrk=np.delete(self.adsbtrk,idx) self.adsbtas=np.delete(self.adsbtas,idx) self.adsbgs=np.delete(self.adsbgs,idx) self.adsbvs=np.delete(self.adsbvs,idx) # Decrease number fo aircraft self.ntraf = self.ntraf - 1 self.eps = np.delete(self.eps, idx) return True def deleteall(self): """Clear traffic buffer""" ndel = self.ntraf for i in range(ndel): self.delete(self.id[-1]) self.ntraf = 0 self.dbconf.reset() self.perf.reset return def update(self): """Sim and command objects quick access""" sim = self.tmx.sim cmd = self.tmx.cmd if (sim.mode == sim.op and sim.dt > 0.0 and self.ntraf > 0): self.dts.append(sim.dt) #---------------- Atmosphere ---------------- self.T, self.rho, self.p = vatmos(self.alt) #-------------- Performance limits autopilot settings -------------- # Check difference with AP settings for trafperf and autopilot self.delalt = self.aalt - self.alt # [m] # below crossover altitude: CAS=const, above crossover altitude: MA = const # aptas hast to be calculated before delspd self.aptas = vcas2tas(self.aspd, self.alt)*self.belco + vmach2tas(self.ama, self.alt)*self.abco self.delspd = self.aptas - self.tas ############################################################################### # Debugging: add 10000 random aircraft # if sim.t>1.0 and self.ntraf<1000: # for i in range(10000): # acid="KL"+str(i) # aclat = random.random()*180.-90. # aclon = random.random()*360.-180. # achdg = random.random()*360. # acalt = (random.random()*18000.+2000.)*0.3048 # self.create(acid,'B747',aclat,aclon,achdg,acalt,350.) # ################################################################################# #-------------------- ADSB update: -------------------- self.adsbtime+=sim.dt ADSB_update=np.where(self.adsbtime>self.trunctime) if not self.ADSBtrunc: ADSB_update=range(self.ntraf) for i in ADSB_update: self.adsbtime[i]-=self.trunctime self.adsblat[i]=self.lat[i] self.adsblon[i]=self.lon[i] self.adsbalt[i]=self.alt[i] self.adsbtrk[i]=self.trk[i] self.adsbtas[i]=self.tas[i] self.adsbgs[i]=self.gs[i] self.adsbvs[i]=self.vs[i] #------------------- ASAS update: --------------------- # Scheduling: when dt has passed or restart: if self.t0asas+self.dtasas<sim.t or sim.t<self.t0asas \ and self.dbconf.swasas: self.t0asas = sim.t # Save old result iconf0 = np.array(self.iconf) # Call with traffic database and sim data self.dbconf.cd_state(sim) self.dbconf.cr_eby(sim) self.dbconf.APorASAS(sim) # Reset label because of colour change chnged = np.where(iconf0!=np.array(self.iconf))[0] for i in chnged: self.label[i]=[" "," ", ""," "] #----------------- FMS GUIDANCE & NAVIGATION ------------------ # Scheduling: when dt has passed or restart: if self.t0fms+self.dtfms<sim.t or sim.t<self.t0fms: self.t0fms = sim.t # FMS LNAV mode: qdr, dist = qdrdist(self.lat, self.lon, self.actwplat, self.actwplon) #[deg][nm] # Check whether shift based dist [nm] is required, set closer than WP turn distance iwpclose = np.where(self.swlnav*(dist < self.actwpturn))[0] # Shift for aircraft i where necessary for i in iwpclose: # Get next wp (lnavon = False if no more waypoints) lat, lon, alt, spd, xtoalt, toalt, lnavon = \ self.route[i].getnextwp() # note: xtoalt,toalt in [m] # End of route/no more waypoints: switch off LNAV if not lnavon: self.swlnav[i] = False # Drop LNAV at end of route # In case of no LNAV, do not allow VNAV mode on it sown if not self.swlnav[i]: self.swvnav[i] = False self.actwplat[i] = lat self.actwplon[i] = lon # User entered altitude if alt >= 0.: self.actwpalt[i] = alt # VNAV calculated altitude is available and active if toalt >= 0. and self.swvnav[i]: # Descent VNAV mode (T/D logic) if self.alt[i]>toalt+10.*ft: # Descent part is in this range of waypoints: # Flat earth distance to next wp dy = (lat-self.lat[i]) dx = (lon-self.lon[i])*cos(radians(lat)) dist2wp = 60.*nm*sqrt(dx*dx+dy*dy) steepness = 3000.*ft/(10.*nm) # 1:3 rule of thumb for now maxaltwp = toalt + xtoalt*steepness # max allowed altitude at next wp self.actwpalt[i] = min(self.alt[i],maxaltwp) #To descend now or descend later? if maxaltwp<self.alt[i]: # if descent is necessary with maximum steepness self.aalt[i] = self.actwpalt[i] # dial in altitude of next waypoint as calculated t2go = max(0.1,dist2wp)/max(0.01,self.gs[i]) self.avs[i] = (self.actwpalt[i] - self.alt[i])/t2go else: print "else 1" pass # TBD # Climb VNAV mode: climb as soon as possible (T/C logic) elif self.swvnav[i] and self.alt[i]<toalt-10.*ft: # Flat earth distance to next wp dy = (lat-self.lat[i]) dx = (lon-self.lon[i])*cos(radians(lat)) dist2wp = 60.*nm*sqrt(dx*dx+dy*dy) self.aalt[i] = self.actwpalt[i] # dial in altitude of next waypoint as calculated t2go = max(0.1,dist2wp)/max(0.01,self.gs[i]) self.avs[i] = (self.actwpalt[i] - self.alt[i])/t2go if spd>0. and lnavon and self.swvnav[i]: if spd<2.0: self.actwpspd[i] = mach2cas(spd,self.alt[i]) else: self.actwpspd[i] = cas2tas(spd,self.alt[i]) else: self.actwpspd[i] = -999. # Calculate distance before waypoint where to start the turn # Turn radius: R = V2 tan phi / g # Distance to turn: wpturn = R * tan (1/2 delhdg) but max 4 times radius turnrad = self.tas[i]*self.tas[i]/tan(self.bank[i]) /g0 /nm # [nm] default bank angle per flight phase # print turnrad dy = (self.actwplat[i]-self.lat[i]) dx = (self.actwplon[i]-self.lon[i])*cos(radians(self.lat[i])) qdr[i] = degrees(atan2(dx,dy)) self.actwpturn[i] = max(3.,abs(turnrad*tan(radians(0.5*degto180(qdr[i]- \ self.route[i].wpdirfrom[self.route[i].iactwp]))))) # [nm] # Set headings based on swlnav self.ahdg = np.where(self.swlnav, qdr, self.ahdg) # NOISE: Turbulence if self.turbulence: timescale=np.sqrt(sim.dt) trkrad=np.radians(self.trk) #write turbulences in array turb=np.array(self.standardturbulence) turb=np.where(turb>1e-6,turb,1e-6) #horizontal flight direction turbhf=np.random.normal(0,turb[0]*timescale,self.ntraf) #[m] #horizontal wing direction turbhw=np.random.normal(0,turb[1]*timescale,self.ntraf) #[m] #vertical direction turbalt=np.random.normal(0,turb[2]*timescale,self.ntraf) #[m] #latitudinal, longitudinal direction turblat=np.cos(trkrad)*turbhf-np.sin(trkrad)*turbhw #[m] turblon=np.sin(trkrad)*turbhf+np.cos(trkrad)*turbhw #[m] else: turbalt=np.zeros(self.ntraf) #[m] turblat=np.zeros(self.ntraf) #[m] turblon=np.zeros(self.ntraf) #[m] # ASAS AP switches #--------- Select Autopilot settings to follow: destination or ASAS ---------- # desired autopilot settings due to ASAS self.deshdg = self.asasactive*self.asashdg+(1-self.asasactive)*self.ahdg self.desspd = self.asasactive*self.asasspd+(1-self.asasactive)*self.aspd self.desalt = self.asasactive*self.asasalt+(1-self.asasactive)*self.aalt self.desvs = self.asasactive*self.asasvsp+(1-self.asasactive)*self.avs # check for the flight envelope self.perf.limits() # update autopilot settings with values within the flight envelope # speed self.aspd = (self.lspd ==0)*self.desspd + (self.lspd!=0)*self.lspd # altitude self.aalt = (self.lalt ==0)*self.desalt + (self.lalt!=0)*self.lalt # hdg self.ahdg = self.deshdg # vs self.avs = (self.lvs==0)*self.desvs + (self.lvs!=0)*self.lvs # below crossover altitude: CAS=const, above crossover altitude: MA = const #climb/descend above crossover: Ma = const, else CAS = const #ama is fixed when above crossover check = self.abco*(self.ama == 0.) swma = np.where(check==True) self.ama[swma] = cas2mach(self.aspd[swma], self.alt[swma]) # ama is deleted when below crossover check2 = self.belco*(self.ama!=0.) swma2 = np.where(check2==True) self.ama[swma2] = 0. #---------- Basic Autopilot modes ---------- # SPD HOLD/SEL mode: aspd = autopilot selected speed (first only eas) # for information: self.aptas = (self.actwpspd>0.)*self.actwpspd + \ (self.actwpspd<=0.)*self.aptas self.delspd = self.aptas - self.tas swspdsel = np.abs(self.delspd) > 0.4 # <1 kts = 0.514444 m/s ax = np.minimum(abs(self.delspd / sim.dt), self.ax) self.tas = swspdsel * (self.tas + ax * np.sign(self.delspd) * \ sim.dt) + (1. - swspdsel) * self.aptas # print "DELSPD", self.delspd/sim.dt, "AX", self.ax, "SELECTED", ax # without that part: non-accelerating ac would have TAS = 0 # print "1-sw", (1. - swspdsel) * self.aptas # print "NEW TAS", self.tas # Speed conversions self.cas = vtas2cas(self.tas, self.alt) self.gs = self.tas self.M = vtas2mach(self.tas, self.alt) # Update performance every self.perfdt seconds if abs(sim.t - self.perft0) >= self.perfdt: self.perft0 = sim.t self.perf.perf() # update altitude self.eps = np.array(self.ntraf * [0.01]) # almost zero for misc purposes swaltsel = np.abs(self.aalt-self.alt) > \ np.maximum(3.,np.abs(2. * sim.dt * np.abs(self.vs))) # 3.[m] = 10 [ft] eps alt # print swaltsel self.vs = swaltsel*((1-self.swvnav)*np.abs(1500./60.*ft) + \ self.swvnav*np.abs(self.avs)*np.sign(self.aalt-self.alt)) self.alt = swaltsel * (self.alt + self.vs * sim.dt) + \ (1. - swaltsel) * self.aalt + turbalt # HDG HOLD/SEL mode: ahdg = ap selected heading delhdg = (self.ahdg - self.trk + 180.) % 360 - 180. #[deg] # print delhdg # omega = np.degrees(g0 * np.tan(self.aphi) / \ # np.maximum(self.tas, self.eps)) # nominal bank angles per phase from BADA 3.12 omega = np.degrees(g0 * np.tan(self.bank) / \ np.maximum(self.tas, self.eps)) self.hdgsel = np.abs(delhdg) > np.abs(2. * sim.dt * omega) self.trk = (self.trk + sim.dt * omega * self.hdgsel * np.sign(delhdg)) % 360. #--------- Kinematics: update lat,lon,alt ---------- ds = sim.dt * self.gs self.lat = self.lat + np.degrees(ds * np.cos(np.radians(self.trk)+turblat) \ / Rearth) self.lon = self.lon + np.degrees(ds * np.sin(np.radians(self.trk)+turblon) \ / np.cos(np.radians(self.lat)) / Rearth) # Update trails when switched on if self.swtrails: self.trails.update(sim.t, self.lat, self.lon, self.lastlat, self.lastlon, self.lasttim, self.id, self.trailcol) else: self.lastlat = self.lat self.lastlon = self.lon self.lattime = sim.t # Update metrics if self.metricSwitch == 1: self.metric.update(self, sim, cmd) # ----------------AREA check---------------- # Update area once per areadt seconds: if self.swarea and abs(sim.t - self.areat0) > self.areadt: # Update loop timer self.areat0 = sim.t # Chekc all aicraft for i in xrange(self.ntraf): # Current status if self.area == "Square": inside = self.arealat0 <= self.lat[i] <= self.arealat1 and \ self.arealon0 <= self.lon[i] <= self.arealon1 and \ self.alt[i] >= self.areafloor and \ (self.alt[i] >= 1500 or self.swtaxi) elif self.area == "Circle": ## Average of lat latavg = (radians(self.lat[i]) + radians(self.metric.fir_circle_point[0])) / 2 cosdlat = (cos(latavg)) # Distance x to centroid dx = (self.lon[i] - self.metric.fir_circle_point[1]) * cosdlat * 60 dx2 = dx * dx # Distance y to centroid dy = self.lat[i] - self.metric.fir_circle_point[0] dy2 = dy * dy * 3600 # Radius squared r2 = self.metric.fir_circle_radius * self.metric.fir_circle_radius # Inside if smaller inside = (dx2 + dy2) < r2 # Compare with previous: when leaving area: delete command if self.inside[i] and not inside: cmd.stack("DEL " + self.id[i]) # Update area status self.inside[i] = inside return def findnearest(self, lat, lon): """Find nearest aircraft""" if self.ntraf > 0: d2 = (lat - self.lat) ** 2 + cos(radians(lat)) * (lon - self.lon) ** 2 idx = np.argmin(d2) del d2 return idx else: return -1 def id2idx(self, acid): """Find index of aircraft id""" try: return self.id.index(acid.upper()) except: return -1 def changeTrailColor(self, color, idx): """Change color of aircraft trail""" # print color # print idx # print " " + str(self.trails.colorsOfAC[idx]) self.trailcol[idx] = self.trails.colorList[color] # print " " + str(self.trails.colorsOfAC[idx]) return def setNoise(self,A): """Noise (turbulence, ADBS-transmission noise, ADSB-truncated effect)""" self.noise=A self.trunctime=1 # seconds self.transerror = [1,100, 100 * ft] #[degree,m,m] standard bearing, distance, altitude error self.standardturbulence = [0,0.1,0.1] #m/s standard turbulence (nonnegative) # in (horizontal flight direction, horizontal wing direction, vertical) if self.noise: self.turbulence=True self.ADSBtransnoise=True self.ADSBtrunc=True else: self.turbulence=False self.ADSBtransnoise=False self.ADSBtrunc=False return def engchange (self, acid, engid): """Change of engines""" self.perf.engchange(acid, engid) return
def __init__(self, tmx): self.tmx = tmx # tmx object contains sim, scr and other main objects self.dts = [] self.ntraf = 0 # model-specific parameters. # Default: BlueSky internal performance model. # Insert your BADA files to the folder "BlueSky/data/coefficients/BADA" # for working with EUROCONTROL`s Base of Aircraft Data revision 3.12 # Check for BADA OPF file path = os.path.dirname(__file__) + '/../../data/coefficients/BADA/' files = os.listdir(path) self.bada = False for f in files: if f.upper().find(".OPF")!=-1: self.bada=True break # Initialize correct performance models if self.bada: self.perf = PerfBADA(self) else: self.perf = Perf(self) self.dts = [] self.ntraf = 0 # Create datalog instance self.log = Datalog() # Traffic list & arrays definition # !!!IMPORTANT NOTE!!! # Anny variables added here should also be added in the Traffic # methods self.create() (append) and self.delete() (delete) # which can be found directly below __init__ # Traffic basic flight data # Traffic basic flight data self.id = [] # identifier (string) self.type = [] # aircaft type (string) self.lat = np.array([]) # latitude [deg] self.lon = np.array([]) # longitude [deg] self.trk = np.array([]) # track angle [deg] self.tas = np.array([]) # true airspeed [m/s] self.gs = np.array([]) # ground speed [m/s] self.cas = np.array([]) # callibrated airspeed [m/s] self.M = np.array([]) # mach number self.alt = np.array([]) # altitude [m] self.fll = np.array([]) # flight level [ft/100] self.vs = np.array([]) # vertical speed [m/s] self.rho = np.array([]) # atmospheric air density [m/s] self.temp = np.array([]) # atmospheric air temperature [K] self.dtemp = np.array([]) # delta t for non-ISA conditions # Traffic performance data self.avsdef = np.array([]) # [m/s]default vertical speed of autopilot self.aphi = np.array([]) # [rad] bank angle setting of autopilot self.ax = np.array([]) # [m/s2] absolute value of longitudinal accelleration self.bank = np.array([]) # nominal bank angle, [radian] self.bphase = np.array([]) # standard bank angles per phase self.hdgsel = np.array([]) # determines whether aircraft is turning # Crossover altitude self.abco = np.array([]) self.belco = np.array([]) # Traffic autopilot settings self.ahdg = [] # selected heading [deg] self.aspd = [] # selected spd(eas) [m/s] self.aptas = [] # just for initializing self.ama = [] # selected spd above crossover altitude (Mach) [-] self.aalt = [] # selected alt[m] self.afll = [] # selected fl [ft/100] self.avs = [] # selected vertical speed [m/s] # limit settings self.lspd = [] # limit speed self.lalt = [] # limit altitude self.lvs = [] # limit vertical speed due to thrust limitation # Traffic navigation information self.orig = [] # Four letter code of origin airport self.dest = [] # Four letter code of destination airport # LNAV route navigation self.swlnav = np.array([]) # Lateral (HDG) based on nav? self.swvnav = np.array([]) # Vertical/longitudinal (ALT+SPD) based on nav info self.actwplat = np.array([]) # Active WP latitude self.actwplon = np.array([]) # Active WP longitude self.actwpalt = np.array([]) # Active WP altitude to arrive at self.actwpspd = np.array([]) # Active WP speed self.actwpturn = np.array([]) # Distance when to turn to next waypoint # VNAV cruise level self.crzalt = np.array([]) # Cruise altitude[m] # Route info self.route = [] # ASAS info per aircraft: self.iconf = [] # index in 'conflicting' aircraft database self.asasactive = np.array([]) # whether the autopilot follows ASAS or not self.asashdg = np.array([]) # heading provided by the ASAS [deg] self.asasspd = np.array([]) # speed provided by the ASAS (eas) [m/s] self.asasalt = np.array([]) # speed alt by the ASAS [m] self.asasvsp = np.array([]) # speed vspeed by the ASAS [m/s] self.desalt = np.array([]) #desired altitude [m] self.deshdg =np.array([]) #desired heading self.desvs =np.array([]) #desired vertical speed [m/s] self.desspd =np.array([]) #desired speed [m/s] # Display information on label self.label = [] # Text and bitmap of traffic label self.trailcol = [] # Trail color: default 'Blue' # Area self.inside = [] # Transmitted data to other aircraft due to truncated effect self.adsbtime=np.array([]) self.adsblat=np.array([]) self.adsblon=np.array([]) self.adsbalt=np.array([]) self.adsbtrk=np.array([]) self.adsbtas=np.array([]) self.adsbgs=np.array([]) self.adsbvs=np.array([]) #----------------------------------------------------------------------------- # Not per aircraft data # Scheduling of FMS and ASAS self.t0fms = -999. # last time fms was called self.dtfms = 1.01 # interval for fms self.t0asas = -999. # last time ASAS was called self.dtasas = 1.00 # interval for ASAS # Flight performance scheduling self.perfdt = 0.1 # [s] update interval of performance limits self.perft0 = -self.perfdt # [s] last time checked (in terms of sim.t) self.warned2 = False # Flag: Did we warn for default engine parameters yet? # ASAS objects: Conflict Database self.dbconf = Dbconf(self,300., 5.*nm, 1000.*ft) # hard coded values to be replaced # Import navigation data base self.navdb = Navdatabase("global") # Read nav data from global folder # Traffic area: delete traffic when it leaves this area (so not when outside) self.swarea = False self.arealat0 = 0.0 # [deg] lower latitude defining area self.arealat1 = 0.0 # [deg] upper latitude defining area self.arealat0 = 0.0 # [deg] lower longitude defining area self.arealat1 = 0.0 # [deg] upper longitude defining area self.areafloor = -999999.0 # [m] Delete when descending through this h self.areadt = 5.0 # [s] frequency of area check (simtime) self.areat0 = -100. # last time checked # Taxi switch self.swtaxi = False # Default OFF: delete traffic below 1500 ft # Research Area ("Square" for Square, "Circle" for Circle area) self.area = "" # Metrics self.metricSwitch = 0 self.metric = Metric() # Bread crumbs for trails self.lastlat = [] self.lastlon = [] self.lasttim = [] self.trails = Trails() self.swtrails = False # Default switched off # ADS-B Coverage area self.swAdsbCoverage = False # Noise (turbulence, ADBS-transmission noise, ADSB-truncated effect) self.setNoise(False) self.eps = np.array([]) return
def __init__(self, name): Metric.__init__(self, name) self._count = 0 return
def __init__(self, name, fn): Metric.__init__(self, name) self._fn = fn return