def process(args): """ General procedure of analysis args - arguments from command line input """ analyzer = Analyzer(args.workload_conf_file) if args.offline: process_offline_data(args, analyzer) else: strict = True if args.fense_type == 'gmm-strict' else False analyzer.build_model(args.util_file, args.metric_file, args.thresh, strict, args.verbose)
def test_load_and_save(): """ Load, save and load again full and thin analyzer instances, and verify that content is identical. """ base_dir = os.path.dirname(__file__) relative_data_dir = 'data/1_example' data_dir = os.path.join(base_dir, relative_data_dir) # ---------------------------- # load and save full analyzer # ---------------------------- # load analyzer analyzer_file = os.path.join(data_dir, 'analyzer_full.p') analyzer_full = Analyzer.load(analyzer_file) # save analyzer in temporary directory outupt_dir = os.path.join(data_dir, 'tmp_full') shutil.rmtree(outupt_dir, ignore_errors=True) # delete existing temp dir # save instance with image dir output_file_full = os.path.join(outupt_dir, 'analyzer.p') analyzer_full.save(output_file_full, save_thin_instance=False, save_images_in_dir=True, image_name_template='{:08d}.png') # ---------------------------- # load and save thin analyzer # ---------------------------- analyzer_file = os.path.join(data_dir, 'analyzer_thin.p') analyzer_thin = Analyzer.load(analyzer_file, load_images_from_dir=True, sfx='png') # use lossless png image, not jpg # save analyzer in temporary directory outupt_dir = os.path.join(data_dir, 'tmp_thin') shutil.rmtree(outupt_dir, ignore_errors=True) # delete existing temp dir # save instance with image dir output_file_thin = os.path.join(outupt_dir, 'analyzer.p') analyzer_thin.save(output_file_thin, save_thin_instance=True, save_images_in_dir=True, image_name_template='{:08d}.png') # use lossless png image, not jpg # ---------------------------------------------------------------- # load analyzers from saved dirs and compare content thin analyzer # ---------------------------------------------------------------- # load analyzers analyzer_full2 = Analyzer.load(output_file_full) analyzer_thin2 = Analyzer.load(output_file_thin, load_images_from_dir=True, sfx='png') # use lossless png image, not jpg # compare content is_equal = compare_instances(analyzer_full2, analyzer_thin2) assert is_equal
def load_test_analyzer(data_dir='ILSVRC2015_00078000'): """ Load analyzer object from saved test data. Parameters ---------- data_dir : str, optional Data directory name, should be name of one of the folders inside analyze/tests/data/. Returns ------- anaylzer : Analyzer Analyzer object. analyzer_root_dir : str Path of analyzer root directory, such that full images path is the concatenation of analyzer_root_dir and analyzer.data image_path. """ # load reference analyzer base_dir = os.path.dirname(__file__) relative_data_dir = os.path.join('data', data_dir) data_dir = os.path.join(base_dir, relative_data_dir) analyzer_file = os.path.join(data_dir, 'analyzer.p') analyzer = Analyzer.load(analyzer_file, load_images_from_dir=False) analyzer_root_dir = os.path.join(base_dir, '..') return analyzer, analyzer_root_dir
def test_visualize_example(): base_dir = os.path.dirname(__file__) relative_data_dir = 'data/ILSVRC2015_00078000' data_dir = os.path.join(base_dir, relative_data_dir) analyze_root_dir = os.path.abspath(os.path.join(base_dir, '..')) os.chdir(analyze_root_dir) # load analyzer analyzer_file = os.path.join(data_dir, 'analyzer.p') analyzer = Analyzer.load(analyzer_file, load_images_from_dir=True) # visualize example save_fig_path = os.path.join(base_dir, 'save_fig_example.png') frame_id = 40 display = False # True image = analyzer.visualize_example(key=frame_id, class_names=CLASS_NAMES, display=display, save_fig_path=save_fig_path) # compare to reference image image_ref_path = os.path.join( base_dir, 'data/visualizations/visualization_example.png') image_ref = cv2.imread(image_ref_path, cv2.IMREAD_UNCHANGED) is_close = np.isclose(image, image_ref, atol=2) # allow up to 2 gray level difference is_all_close = np.all(is_close) assert is_all_close
def _test_message(self, message: Descriptor): table_resolver = TableResolver() analyzer = Analyzer(table_resolver) analyzer.generate_tables_for_file(self._create_file_with_message(message)) result = list(table_resolver.tables) self.assertTrue(len(result) is 1) result_table: Table = result[0] # Sort the fields by name. def sorted_by_fields_name(fields): return fields.sort(key=lambda field: field.name) for table_field, message_field in zip(sorted_by_fields_name(result_table.fields), sorted_by_fields_name(message.fields)): self._test_field(table_field, message_field)
def test_empty_preds_gts(): base_dir = os.path.dirname(__file__) relative_data_dir = 'data/ILSVRC2015_00078000' data_dir = os.path.join(base_dir, relative_data_dir) # load analyzer analyzer_file = os.path.join(data_dir, 'analyzer.p') analyzer = Analyzer.load(analyzer_file, load_images_from_dir=False) # initialize total confusion matrix num_classes = len(CLASS_NAMES) cm_total = np.zeros((num_classes + 1, num_classes + 1)) # iterate over frames counter = 0 for frame_id, item in analyzer.items(): # unpack data prediction, ground_truth, image_path, image = analyzer.unpack_item( item)[0:4] if counter == 1: # delete predictions prediction = Box([], image_shape=(100, 200)) elif counter == 2: # delete groud truths ground_truth = Box([], image_shape=(100, 200)) elif counter == 3: # delete image shape try: ground_truth = Box([], image_shape=None) except ValueError as e: cond3 = e.__repr__( ) == "ValueError('image_shape must be tuple of length 2 (height, width) or 3 (height, width, channels), got None',)" # calculate confusion matrix of current frame cm = ConfusionMatrix.calculate_confusion_matrix(prediction, ground_truth, CLASS_NAMES, normalize=None, score_th=0.3, iou_th=0.5, iou_criterion='all', score_criterion='all', display=False) if counter == 1: # delete predictions cond1 = cm[:, -1].sum() == 3 elif counter == 2: # delete groud truths cond2 = cm[-1, :].sum() == 3 # analyze cm # advance counter counter += 1 # check conditions assert cond1 assert cond2 assert cond3
def test_analyzer_mutable_mapping_implementation(): """ Analyzer class inherits from collections.abc.MutableMapping, which demands implementation of several class methods such as __setitem__, __iter__, etc. Here we will check the correctness of these implementations. """ # load analyzer base_dir = os.path.dirname(__file__) relative_data_dir = 'data/ILSVRC2015_00078000' data_dir = os.path.join(base_dir, relative_data_dir) analyzer_file = os.path.join(data_dir, 'analyzer.p') analyzer = Analyzer.load(analyzer_file, load_images_from_dir=False) analyzer2 = Analyzer() # check iteration for frame_id, item in analyzer.items(): # check unpacking prediction, ground_truth, image_path, image = analyzer.unpack_item( item)[0:4] # check __setitem by assigning to second analyzer analyzer2[frame_id] = item # add attributes to analyzer2 analyzer2.image_resize_factor = analyzer.image_resize_factor analyzer2.video_processor = analyzer.video_processor analyzer2.output_dir = analyzer.output_dir analyzer2.class_names = analyzer.class_names analyzer2.bbox_match_method = analyzer.bbox_match_method analyzer2.iou_th = analyzer.iou_th analyzer2.score_th = analyzer.score_th # compare 2 analyzers is_equal = compare_instances(analyzer, analyzer2) assert is_equal # check __delitem__ keys = list(analyzer.keys()) key_to_delete = keys[0] del analyzer[key_to_delete]
def test_initialize_with_simple_wrapper(): base_dir = os.path.dirname(__file__) relative_data_dir = 'data/ILSVRC2015_00078000' data_dir = os.path.join(base_dir, relative_data_dir) # load analyzer analyzer_file = os.path.join(data_dir, 'analyzer.p') analyzer = Analyzer.load(analyzer_file, load_images_from_dir=False) # initialize total confusion matrix num_classes = len(CLASS_NAMES) cm_total = np.zeros((num_classes + 1, num_classes + 1))
def generate_report(): # load reference analyzer base_dir = os.path.dirname(__file__) relative_data_dir = '../tests/data/ILSVRC2015_00078000' data_dir = os.path.join(base_dir, relative_data_dir) analyzer_file = os.path.join(data_dir, 'analyzer.p') analyzer = Analyzer.load(analyzer_file, load_images_from_dir=False) # set output directory analyzer.output_dir = os.path.join(base_dir, 'output', 'generate_report') os.makedirs(analyzer.output_dir, exist_ok=True) # generate report analyzer.evaluate_performance(generate_report=True) pass
def main(): # Read request message from stdin data = sys.stdin.buffer.read() # Parse request request = plugin.CodeGeneratorRequest() request.ParseFromString(data) # Create response response = plugin.CodeGeneratorResponse() # TODO: clean this part. # Generate code table_resolver = TableResolver() analyzer = Analyzer(table_resolver) pool = DescriptorPool() for proto_file in request.proto_file: pool.Add(proto_file) analyzer.generate_tables_for_file( file_descriptor=pool.FindFileByName(proto_file.name)) analyzer.link_tables_references() writer = ProtoPluginResponseWriter() writer.write(generator=KotlinExposedGenerator(), tables=table_resolver.tables, plugin_response=response) # Serialise response message output = response.SerializeToString() # Write to stdout sys.stdout.buffer.write(output)
response = urllib2.urlopen(req, json.dumps(analyzedDict)) html = response.read() html = json.loads(html) print "Success!\n" print "Result:" for key, value in html.iteritems(): if isinstance(value, dict): print str(key)+":" for key2, value2 in value.iteritems(): print " "+str(key2)+": "+str(value2) else: print str(key)+": "+str(value) if __name__ == "__main__": directory = "../device/data/analyze/" analyzer = Analyzer(directory) while True: if filesAvailable(directory): print "\nNew files are available.\nWaiting for all data..." sleep(2) print "Data received.\nStart analyzing..." analyzedDict = analyzer.analyze() print "Data analyzed.\nStart posting..." postData(analyzedDict) sleep(5)
import os from analyze.analyzer import Analyzer from analyze.viewer import AnalyzerViewer if __name__ == '__main__': import os import panel as pn pn.extension() # load analyzer base_dir = os.path.dirname(__file__) relative_data_dir = '../tests/data/ILSVRC2015_00078000' data_dir = os.path.join(base_dir, relative_data_dir) analyzer_file = os.path.join(data_dir, 'analyzer.p') analyzer = Analyzer.load(analyzer_file, load_images_from_dir=False) os.chdir('..') # go one level up # initialize viewer viewer = AnalyzerViewer(analyzer, resize_factor=2.) # view analyzer viewer.view() print('Done!')
def basic_usage_example(): """ Simulate inference loop by using saved analyzer. """ # load reference analyzer analyzer_ref, analyze_root_dir = load_test_analyzer() base_dir = os.path.dirname(__file__) images_path = os.path.abspath( os.path.join(base_dir, '..', 'tests/data/ILSVRC2015_00078000/images')) # create new analyzer output_dir = os.path.join(base_dir, 'output', 'simple_usage_example') analyzer = Analyzer( output_dir=output_dir, output_video_name='video.avi', class_names=analyzer_ref.class_names, bbox_match_method='pred_bbox_center', score_th=0.25, iou_th=0.4, ) # set output images format and directory output_images_dir = os.path.join(output_dir, 'images') os.makedirs(output_images_dir, exist_ok=True) output_image_format = 'jpg' # png pattern_length = 8 output_image_pattern = '%0{}d.{}'.format(pattern_length, output_image_format) images_with_boxes = [] # simulate inference loop by iterating over data saved in analyzer_ref counter = 0 for frame_id, item in analyzer_ref.items(): # inference simulation prediction, ground_truth, image_path, image, _ = analyzer_ref.unpack_item( item) image_path = os.path.join(images_path, os.path.basename(image_path)) # log results in analyzer # IMPORTANT: # in real usage of analyzer you will probably need to to some pre-processing to convert inference output to the # form that analyzer expects. mainly - convert predictions and ground_truth to bounding_box.Box(). analyzer.update_analyzer(key=frame_id, prediction=prediction, ground_truth=ground_truth, image_path=image_path, analyze_performance=True) # optional: save visualizations image_out_path = os.path.join(output_images_dir, output_image_pattern % frame_id) image_with_boxes = analyzer.visualize_example( key=frame_id, image=image, show_predictions=True, show_ground_truth=True, class_names=analyzer_ref.class_names, rgb2bgr=False, display=False, save_fig_path=image_out_path, ) # optional: save images with visualizations in a list - later will be used for video creation images_with_boxes.append(image_with_boxes) # optional: periodically save analyzer and video, can omit and save only at inference loop end if np.mod(counter, 20) == 0: analyzer.save(save_thin_instance=False, save_images_in_dir=True) analyzer.update_video(images_with_boxes) counter += 1 # save final analyzer and video analyzer.save(save_thin_instance=True, save_images_in_dir=False) analyzer.update_video(images_with_boxes) # analyze full run - save performance report in output folder analyzer.evaluate_performance(generate_report=True) print('basic_usage_example - done!')
def test_confusion_matrix(): base_dir = os.path.dirname(__file__) relative_data_dir = 'data/ILSVRC2015_00078000' data_dir = os.path.join(base_dir, relative_data_dir) # load analyzer analyzer_file = os.path.join(data_dir, 'analyzer.p') analyzer = Analyzer.load(analyzer_file, load_images_from_dir=False) # initialize total confusion matrix num_classes = len(CLASS_NAMES) cm_total = np.zeros((num_classes + 1, num_classes + 1)) # iterate over frames for frame_id, item in analyzer.items(): # unpack data prediction, ground_truth, image_path, image = analyzer.unpack_item( item)[0:4] # calculate confusion matrix of current frame cm = ConfusionMatrix.calculate_confusion_matrix(prediction, ground_truth, CLASS_NAMES, normalize=None, score_th=0.3, iou_th=0.5, iou_criterion='all', score_criterion='all', display=False) # analyze cm # add current cm to total cm cm_total += cm # analyze cm_total # normalize cm cm_total_norm = ConfusionMatrix.normalize_confusion_matrix(cm_total, norm_type='gt') # save_fig_name = os.path.join(data_dir, 'analysis/confusion_matrix.png') # ConfusionMatrix.plot_confusion_matrix(cm_total_norm, display_labels=CLASS_NAMES, save_fig_name=save_fig_name, display=False) # for display cm_total_ref = np.array([ [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 159., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 3. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 37., 0., 0., 0., 0., 0., 0., 0., 30. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. ], [ 0., 0., 0., 0., 0., 0., 0., 11., 0., 11., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 11., 0., 0., 0., 0., 0., 0., 0., 0. ], ]) is_equal_mat = cm_total_ref == cm_total is_equal = cm_total == pytest.approx(cm_total_ref) assert is_equal
def main(): """ Script entry point. """ ctx = Context() ctx.args = parse_arguments() ctx.cgroup_driver = detect_cgroup_driver() ctx.analyzer = Analyzer(ctx.args.workload_conf_file, ctx.args.thresh_file) init_wlset(ctx) init_sysmax(ctx) if ctx.args.enable_prometheus: ctx.prometheus.start() if ctx.args.control: ctx.cpuq = CpuQuota(ctx.sysmax_util, ctx.args.margin_ratio, ctx.args.verbose) quota_controller = NaiveController(ctx.cpuq, ctx.args.quota_cycles) ctx.llc = LlcOccup(Resource.BUGET_LEV_MIN, ctx.args.exclusive_cat) llc_controller = NaiveController(ctx.llc, ctx.args.llc_cycles) if ctx.args.disable_cat: ctx.llc = LlcOccup(Resource.BUGET_LEV_FULL, exclusive=False) ctx.controllers = {Contention.CPU_CYC: quota_controller} else: ctx.controllers = {Contention.CPU_CYC: quota_controller, Contention.LLC: llc_controller} if ctx.args.record: cols = ['time', 'cid', 'name', Metric.UTIL] with open(Analyzer.UTIL_FILE, 'w') as utilf: utilf.write(','.join(cols) + '\n') threads = [Thread(target=monitor, args=(mon_util_cycle, ctx, ctx.args.util_interval))] if ctx.args.collect_metrics: if ctx.args.record: cols = ['time', 'cid', 'name', Metric.INST, Metric.CYC, Metric.CPI, Metric.L3MPKI, Metric.L3MISS, Metric.NF, Metric.UTIL, Metric.L3OCC, Metric.MBL, Metric.MBR, Metric.L2STALL, Metric.MEMSTALL, Metric.L2SPKI, Metric.MSPKI] with open(Analyzer.METRIC_FILE, 'w') as metricf: metricf.write(','.join(cols) + '\n') threads.append(Thread(target=monitor, args=(mon_metric_cycle, ctx, ctx.args.metric_interval))) for thread in threads: thread.start() print('eris agent version', __version__, 'is started!') try: for thread in threads: thread.join() except KeyboardInterrupt: print('Shutdown eris agent ...exiting') ctx.shutdown = True except Exception: traceback.print_exc(file=sys.stdout) sys.exit(0)