def testCache(self): tree = struct_tree.StructTree(self._data, readonly=True) self.assertEqual(tree['strings.a'], ['apple', 'antenna', 'alps']) self._data['strings']['a'] = ['potatoes', 'tomatoes'] # Data has changed, but StructTree should still return cached data. self.assertEqual(tree['strings.a'], ['apple', 'antenna', 'alps']) # If we re-create a tree, then it should return the new data. tree = struct_tree.StructTree(self._data, readonly=True) self.assertEqual(tree['strings.a'], ['potatoes', 'tomatoes'])
def GetLatest(self, specified_message_enum=None, update_access_time=True): """Get the latest messages from each source with the given message type.""" current_time = self._CurrentTimeSec() resp = {} if specified_message_enum is None: message_type_generator = _MESSAGE_TYPE_HELPER.Names() else: message_type_generator = [ _MESSAGE_TYPE_HELPER.Name(specified_message_enum) ] # Get latest messages for all message types. for message_type in message_type_generator: short_message_type = _MESSAGE_TYPE_HELPER.ShortName(message_type) message_enum = _MESSAGE_TYPE_HELPER.Value(message_type) max_buffer_time = self._buffer_time[message_type] message_info = self._GetTimelyMessages(message_enum, current_time, max_buffer_time) if message_info: resp[short_message_type] = message_info # Tack on the filered data as a top-level key in the message snapshot. resp['filtered'] = aio_helper.GetFilteredData() dict_obj = struct_tree.StructTree(resp, fail_silently=True, readonly=True) if update_access_time: self._UpdateLastAccessTime() return dict_obj
def AutoCheck(log_file, check_list): """Run checks for a log file. Args: log_file: The log file to check. check_list: A CheckList object with criteria to check. Returns: A dictionary of checked results. Example: results[<message_type>][<aio_node>][<top_attribute>][<sub_field>] = { 'total': 1000, 'warnings': { 'count': 123, 'range': [48.8, 55.6], }, 'errors': { 'count': 123, 'range': [78.8, 91.6], }, } """ data = struct_tree.StructTree(log_file, True) cache = lru_cache.LruCache(50) for check_item in check_list.List(): # Check every element. args = check_item.Populate(data, cache) check_item.Check(*args)
def BrowseLog(request, path): """Browse the log by expanding the field at `path`. Args: request: An HttpRequest from the client. path: A path pointing to one field in the log. Returns: An HttpResponse serializing a list of names for child fields. """ # The log structure may differ across logs, we always use the first log to # construct the log structure. log_path = request.session['log_paths'][0] log_data = struct_tree.StructTree(log_path, fail_silently=True, readonly=True) try: skeleton = log_data.Skeleton(path, depth=1) except h5_io.H5IndexError: return http.HttpResponse('{}') parent_path = path d3_data = struct_tree.DictToD3Tree(skeleton, '.', parent_path) if 'children' in d3_data: # The first layer is a placeholder. Starts from the second layer. return http.HttpResponse(json.dumps(d3_data['children'])) else: return http.HttpResponse('{}')
def testGradebookOnAio(self): book = self._GetValidSampleGradebook() messages = struct_tree.StructTree({ 'MotorStatus': { 'MotorPti': { 'temps': [30, 40, 50, 60] } } }, True) checks = gradebook_base_check.GradebookChecks() checks.Initialize(book, for_log=False, use_full_name=False) for item in checks.List(): item.Check(*item.Populate(messages)) if item.FieldIndex() == 'MotorStatus.MotorPti.temps[0]': self.assertEqual(item.GetResults(), [{ 'name': 'Value', 'value': 60, 'stoplight': 3 }]) elif item.FieldIndex() == 'MotorStatus.MotorPti.temps[1]': self.assertEqual(item.GetResults(), [{ 'name': 'Value', 'value': 80, 'stoplight': 2 }]) else: self.assertFalse(item.GetResults())
def testIndicesToOrderAndDedup(self): sequence = numpy.array([1, 10, 10, 2, 2, 3, 4, 5, 4, 3, 7, 7, 7, 8, 8, 1]) prefix_template = string.Template('$message_type.$aio_node') data = {'a': {'b': {'aio_header': {'sequence': sequence}}}} indices = log_util.MessageOrderedIndices( struct_tree.StructTree(data), 'a', 'b', prefix_template, wraparound=10) reference = numpy.array([1, 0, 3, 5, 6, 7, 10, 13, 15]) output = numpy.array([10, 1, 2, 3, 4, 5, 7, 8, 1]) self.assertTrue(numpy.array_equal(indices, reference)) self.assertTrue(numpy.array_equal(sequence[indices], output)) reference = numpy.array([1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1]) bitmask = log_util.MessageDedupSelection( struct_tree.StructTree(data), 'a', 'b', prefix_template, wraparound=10) self.assertTrue(numpy.array_equal(bitmask, reference))
def CompareLogData(log_paths, field_labels): """Get series of data, each corresponding to field values within a log.""" series = {} base_timeline = float('inf') for log_path in log_paths: log_data = struct_tree.StructTree(log_path, fail_silently=True, readonly=True) log_name = os.path.basename(log_path) if '.' in log_name: log_name = log_name[:log_name.rfind('.')] for field, legend_label in field_labels.iteritems(): data, timestamps = log_util.GetOrderedDedupDataAndTimeByField( log_data, field, rebase=True) if data is None or timestamps is None: continue data, _ = _DownSample(data, settings.MAX_DATA_POINTS_PER_LOG_FIELD) timestamps, downsample_rate = _DownSample( timestamps, settings.MAX_DATA_POINTS_PER_LOG_FIELD) base_timeline = min(base_timeline, float(timestamps[0])) short_name = '%s.%s' % (log_name, legend_label) if downsample_rate > 1: short_name += '(/%d)' % downsample_rate series[short_name] = {'x': timestamps, 'y': data.tolist()} for short_name in series: series[short_name]['x'] = (series[short_name]['x'] - base_timeline).tolist() return series
def ConcatenateLogData(log_paths, field_labels): """Get series of data, each corresponding to field values in all logs.""" series = {} base_timeline = float('inf') for log_path in log_paths: log_data = struct_tree.StructTree(log_path, fail_silently=True, readonly=True) for field, legend_label in field_labels.iteritems(): data, timestamps = log_util.GetOrderedDedupDataAndTimeByField( log_data, field, rebase=False) if data is None or timestamps is None: continue base_timeline = min(base_timeline, float(timestamps[0])) if legend_label not in series: series[legend_label] = {'x': timestamps, 'y': data} else: series[legend_label]['x'] = numpy.concatenate( (series[legend_label]['x'], timestamps)) series[legend_label]['y'] = numpy.concatenate( (series[legend_label]['y'], data)) result = {} for field, legend_label in field_labels.iteritems(): timestamps, _ = _DownSample(series[legend_label]['x'], settings.MAX_DATA_POINTS_PER_LOG_FIELD) data, downsample_rate = _DownSample( series[legend_label]['y'], settings.MAX_DATA_POINTS_PER_LOG_FIELD) if downsample_rate > 1: legend_label += '(/%d)' % downsample_rate result[legend_label] = { 'x': (timestamps - base_timeline).tolist(), 'y': data.tolist() } return result
def testSubtree(self): self.assertEqual( self._struct_tree.Subtree('strings.c').Data( convert_to_basic_types=True), ['car', 'cat', 'circuit']) self.assertEqual(self._struct_tree.Subtree('deep.x')['y.z'], 0) self._struct_tree = struct_tree.StructTree(self._data) with tempfile.NamedTemporaryFile() as temp_hdf5: data = copy.copy(self._data) # Lists of dicts are not valid HDF5 objects. Remove them. del data['numbers'] h5_io.H5Dump(temp_hdf5.name, data) h5_tree = struct_tree.StructTree(temp_hdf5.name) self.assertEqual( h5_tree.Subtree('strings.c').Data(convert_to_basic_types=True), ['car', 'cat', 'circuit']) # Dataset[()] is h5py's way to get the value of a scalar dataset. self.assertEqual(h5_tree.Subtree('deep.x')['y.z'][()], 0)
def testGradebookOnLog(self): book = self._GetValidSampleGradebook() message = numpy.array( [(((30., 31.),), (1,)), (((50., 51.),), (3,)), (((40., 41.),), (2,)), (((60., 61.),), (4,))], dtype=[ ('message', [('temps', 'f', (2,))]), ('aio_header', [('sequence', '>u2'),]) ] ) dataset = { 'messages': { 'kAioNodeMotorPti': { 'kMessageTypeMotorStatus': message } } } with tempfile.NamedTemporaryFile() as temp_hdf5: h5_io.H5Dump(temp_hdf5.name, dataset) checks = gradebook_base_check.GradebookChecks() checks.Initialize(book, for_log=True, use_full_name=True) for item in checks.List(): item.Check(*item.Populate(struct_tree.StructTree(temp_hdf5.name))) # After ordering the sequence and applying the callback, temps[0] # becomes [60.0, 80.0, 100.0, 120.0] and temps[1] becomes # [62.0, 82.0, 102.0, 122.0]. if item.FieldIndex() == 'MotorStatus.MotorPti.temps[0]': self.assertEqual(item.GetResults(), { 'MotorPti.Board Temperature (Value)': { 'warning': {'count': 3, 'range': [80.0, 120.0], 'total': 4, 'sections': [(1, 4)], 'expecting': '[[60, 70], 0]'} } }) self.assertTrue(item.HasWarnings()) self.assertFalse(item.HasErrors()) elif item.FieldIndex() == 'MotorStatus.MotorPti.temps[1]': self.assertEqual(item.GetResults(), { 'MotorPti.temps[1] (Value)': { 'warning': {'count': 1, 'range': [82.0, 82.0], 'total': 4, 'sections': [(1, 2)], 'expecting': '[[60, 70], 0]'}, 'error': {'count': 2, 'range': [102.0, 122.0], 'total': 4, 'sections': [(2, 4)], 'expecting': '[[60, 90]]'} } }) self.assertTrue(item.HasWarnings()) self.assertTrue(item.HasErrors()) else: self.assertFalse(item.GetResults()) self.assertFalse(item.HasWarnings()) self.assertFalse(item.HasErrors())
def _SynthesizeTetherTelemetry(self): data = {} filtered = aio_helper.GetFilteredData() filtered.merge_tether_down.valid = True comms_status = filtered.merge_tether_down.comms_status comms_status_valid = filtered.merge_tether_down.comms_status_valid data['filtered'] = filtered messages = struct_tree.StructTree(data, readonly=False) return comms_status, comms_status_valid, messages
def setUp(self): self.array = np.array([(0.0, 20), (1.0, 40), (2.0, 80), (3.0, 160)], dtype=[('x', '<f8'), ('y', '<i8')]) self._data = { 'numbers': [ { 1: ['apple', 'antenna', 'alps'] }, { 2: ['banana', 'bus', 'bound'] }, { 3: ['car', 'cat', 'circuit'] }, { 4: ['dance', 'deer', 'direction'] }, ], 'strings': { 'a': ['apple', 'antenna', 'alps'], 'b': ['banana', 'bus', 'bound'], 'c': ['car', 'cat', 'circuit'], 'd': ['dance', 'deer', 'direction'], }, 'deep': { 'x': { 'y': { 'z': 0 } } }, 'numpy': self.array } self._struct_tree = struct_tree.StructTree(self._data) self._ndarray = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) self._numpy_tree = struct_tree.StructTree({'array': self._ndarray}) self._ctype_tree = test_util.SynthesizeMessages('MotorStatus', 0) self.assertIn('strings.a', self._struct_tree) self.assertNotIn('strings.z', self._struct_tree) self.assertNotIn('shallow.a', self._struct_tree)
def _GetMessage(unused_request, client_id, message_type=None): """Get a message from the receiver.""" message_receiver = receiver_manager.ReceiverManager.GetReceiver(client_id) resp = struct_tree.StructTree({}, fail_silently=True, readonly=True) if message_receiver: if message_type is not None: message_enum = MESSAGE_TYPE_HELPER.Value(message_type) else: message_enum = None resp = message_receiver.GetLatest(message_enum) return resp
def testFlapsIndicatorSparseCommsCoverage(self): filtered = aio_helper.GetFilteredData() filtered.merge_tether_down.valid = True for i in range(aio_labels.kNumServos): state = filtered.merge_tether_down.tether_down.servo_statuses[i] state.no_update_count = 0 state.angle = float(i) indicator = control.FlapsIndicator(common.SPARSE_COMMS_MODE) messages = struct_tree.StructTree({'filtered': filtered}) self.assertEqual(stoplights.STOPLIGHT_NORMAL, indicator.Filter(messages)[1])
def _GetSynthesizedControlTelemetry(self): messages = self._SynthesizeControlTelemetry() data = messages.Data(convert_to_basic_types=False) control_telemetry = data['ControlTelemetry']['ControllerA'] filtered = aio_helper.GetFilteredData() filtered.merge_tether_down.valid = True tether_control_telemetry = ( filtered.merge_tether_down.tether_down.control_telemetry) tether_control_telemetry.no_update_count = 0 data['filtered'] = filtered messages = struct_tree.StructTree(data, readonly=False) return control_telemetry, tether_control_telemetry, messages
def testH5Skeleton(self): hdf5_tree = struct_tree.StructTree(self.tempdata_fp.name) table_skeleton = { 'instrument': (2, ), 'filter': (2, ), 'response': { 'linenumber': (2, 2), 'wavelength': (2, 2), 'throughput': (2, 2), } } self.assertEqual(hdf5_tree.Skeleton(['numbers', 'table']), table_skeleton) flattened_table_skeleton = [ ('instrument', (2, )), ('filter', (2, )), ('response.linenumber', (2, 2)), ('response.wavelength', (2, 2)), ('response.throughput', (2, 2)), ] self.assertEqual(set(hdf5_tree.ListSkeleton(['numbers', 'table'])), set(flattened_table_skeleton)) expected_skeleton = { 'dataset': (3, ), 'empty_group': None, # An empty group's skeleton is None. 'numbers': { 'integers': (3, ), 'floats': (3, ), 'nans': None, 'table': table_skeleton, } } self.assertEqual(hdf5_tree.Skeleton(), expected_skeleton) expected_flattened_skeleton = [ ('dataset', (3, )), ('empty_group', None), # An empty group's skeleton is None. ('numbers.integers', (3, )), ('numbers.floats', (3, )), ('numbers.nans', None), ] + [('numbers.table.%s' % k, v) for k, v in flattened_table_skeleton] self.assertEqual(set(hdf5_tree.ListSkeleton()), set(expected_flattened_skeleton)) expected_skeleton['numbers']['table'] = {} self.assertEqual(hdf5_tree.Skeleton(depth=2), expected_skeleton)
def PeriodicDataPoll(request, client_id, layout_name): """Compute realtime data and respond to periodic polling from a client layout. Args: request: An HttpRequest from the client. client_id: The ID of the client's browser tab. layout_name: Name of the layout associated with the client. Returns: An HttpResponse in the format of a serialized JSON object. """ aggregated_message = _GetMessage(request, client_id) if not aggregated_message: aggregated_message = struct_tree.StructTree({}, fail_silently=True, readonly=True) layout = loader.LayoutLoader().GetLayoutByModuleName(layout_name) tab_memory = layout_memory.GetMemory(client_id, False) if tab_memory is not None: # Load the persistent memory. layout.Import(tab_memory) else: layout.Initialize() tab_memory = layout_memory.GetMemory(client_id, True) # Start the AIO receiver in case the server has restarted. _TryToEnforceAioReceiver(client_id) try: data = layout.Filter(aggregated_message) except Exception: # pylint: disable=broad-except # layout.Filter may introduce any kind of exception. logging.error('PeriodicDataPoll encountered an error:\n%s', debug_util.FormatTraceback()) layout.Export(tab_memory) return http.HttpResponse('{}') # Save the persistent memory. layout.Export(tab_memory) resp = data.Json() if settings.DEBUG: resp['__message__'] = '\n-----------------------------\n'.join( 'Error in indicator "%s":\n%s' % (k, v) for k, v in layout.ErrorReport()) resp_str = json.dumps(resp) layout.ClearErrors() return http.HttpResponse(resp_str)
def __init__(self, minimum_stale_timeout_seconds, stale_timeout_in_periods, receiver_idle_timeout_seconds, log_path, message_type, source, network_yaml_file, aio_message_sequence_bits): super(LogReceiver, self).__init__(minimum_stale_timeout_seconds, stale_timeout_in_periods, receiver_idle_timeout_seconds, [message_type], network_yaml_file, aio_message_sequence_bits) log_data = struct_tree.StructTree(log_path, fail_silently=True, readonly=True) self._messages = log_data.Index(('messages', source, message_type)) self._index = 0 self._bound = self._messages['aio_header']['source'].size self._received_message_enums = set() # A dictionary of recently received messages. # {<message_type>: {<aio_node>: message}} self._message_snapshot = collections.defaultdict( lambda: collections.defaultdict(lambda: None)) # The lock guards all read/write accesses to _message_snapshot. self._value_lock = threading.Lock()
def SynthesizeMessages(message_names=None, sequence=0, fail_silently=False): """Synthesize fake AIO messages. Args: message_names: The set of message type names. If None, all messages are synthesized. sequence: The sequence number of all the messages. fail_silently: If True, operations over the returned StructTree return None rather than raising exceptions. Returns: A StructTree object as a nested dict of messages, indexed first by message types and then AIO nodes. """ # TODO: Clean-up nonstandard messages. excluded = ['Stdio', 'ControlDebug'] messages = {} message_types = network_config.NetworkConfig( settings.NETWORK_YAML).all_messages for m in message_types: if m.name not in _MESSAGE_TYPE_HELPER or m.name in excluded: continue if message_names and m.name not in message_names: continue msg_enum = _MESSAGE_TYPE_HELPER.Value(m.name) messages[m.name] = {} for sender in m.all_senders: aio_node = sender.camel_name message = FakeAioMessage(msg_enum, aio_node, sequence) if message.IsValid(): messages[m.name][aio_node] = message.Get(readonly=True) messages['filtered'] = SynthesizeFilteredData() return struct_tree.StructTree(messages, fail_silently=fail_silently, readonly=False)
def GetRawLogData(request, fields): """Get values of data fields within a log file.""" log_paths = request.session['log_paths'] fields = [f.strip() for f in fields.split('\n') if f.strip()] field_labels = layout_util.GetDistinguishableNames( fields, '.', ['kAioNode', 'kMessageType']) result = {} # Remove special characters so variables can be parsed and loaded into Matlab. bad_chars = [ '.', ',', '-', '+', '(', ')', '[', ']', '{', '}', ':', 'kMessageType', 'kAioNode', 'messages', 'message' ] replacement = list(zip(bad_chars, ['_'] * len(bad_chars))) replacement = [('[:]', ''), (':,', ''), (' ', '')] + replacement for log_path in log_paths: base_name = os.path.basename(log_path) log_name = 'log_' + _StringReplace(base_name[:base_name.find('.')], replacement) log_data = struct_tree.StructTree(log_path, fail_silently=True, readonly=True) result[log_name] = {} for field, legend_label in field_labels.iteritems(): data, timestamps = log_util.GetOrderedDedupDataAndTimeByField( log_data, field, rebase=False) result[log_name][_StringReplace(legend_label, replacement)] = { 'values': data.tolist() if data is not None else None, 'timestamps': timestamps.tolist() if timestamps is not None else None, 'status': 'success' if data is not None else 'missing', } response = http.HttpResponse(content_type='text/plain') response[ 'Content-Disposition'] = 'attachment; filename=makani_log_data.json' response.write(json.dumps(result, indent=2)) return response
def testH5Extract(self): hdf5_tree = struct_tree.StructTree(self.tempdata_fp.name) self.assertEqual(3.4, hdf5_tree.Index('numbers.floats[1]')) self.assertTrue( np.array_equal([1, 2, 3], hdf5_tree.Index('dataset[:]'))) self.assertEqual( { 'wavelength': np.float32(2e6), 'throughput': np.float32(1.6), 'linenumber': np.float32(1), }, hdf5_tree.Index('numbers.table.response[:][0, 1]')) expected = { 'wavelength': np.array([1.89e6, 2e6]), 'throughput': np.array([0., 1.6]), 'linenumber': np.array([0, 1]) } data = hdf5_tree.Index('numbers.table.response[:][0]') self.assertEqual(set(data.keys()), set(expected.keys())) for key in data.keys(): np.testing.assert_array_almost_equal(data[key], expected[key])
def _AddIndicators(self, view_name, indicators, properties=None): """"Append indicators to a view. This function is called when a layout is being initialized. Args: view_name: Name of the view that includes the indicators. indicators: The list of indicators to add. properties: A dict of display properties to be updated. """ # Add indicators. _, indicator_list, view_properties = self._GetView(view_name) view_index = self._view_index[view_name] num_indicators = len(indicator_list) for i, indicator in enumerate(indicators): # Call Filter() with empty data to get the number of output variables. empty_message_snapshot = {} empty_message_snapshot['filtered'] = aio_helper.GetFilteredData() outputs = indicator.Filter( struct_tree.StructTree(empty_message_snapshot, fail_silently=True, readonly=True)) output_count = len(outputs) if isinstance(outputs, tuple) else 1 # Assign a unique name, labeled by view_index, indicator_index (i), # and variable index (n). This name is then used by Plot() to populate # the widgets, and later by Filter() to assign computed values to # widget inputs. var_names = [ 'var_%d_%d_%d' % (view_index, i + num_indicators, n) for n in range(output_count) ] indicator_list.append((indicator, var_names)) if properties is not None: view_properties.update(properties)
def _Run(self, monitor_params, messages, expected): messages_obj = struct_tree.StructTree(messages, readonly=False) indicator = self._CreateIndicator(monitor_params) plot_data = indicator.Filter(messages_obj) self.assertEqual(plot_data, tuple(expected))
def testEmpty(self): self.assertTrue(self._ctype_tree) self.assertTrue(self._numpy_tree) self.assertTrue(self._struct_tree) self.assertFalse(struct_tree.StructTree({}))
def ViewLogStructure(request, paths, template_name='log_structure.html'): """View structure of an HDF5 log at given log path. Args: request: An HttpRequest from the client. paths: Paths to the local log files. template_name: The HTML template used to render the layout. Returns: An HttpResponse that renders the log structure. """ # `context` includes variables used to render the HTML. context = { 'graph_width': 6000, 'graph_height': 6000, 'frame_width': 200, 'frame_height': 540, 'canvas_cols': 12, } log_paths = [] for path in paths.split(';'): path = path.strip() if not path: continue path_template = string.Template(path) log_path = path_template.substitute(os.environ) basename = os.path.basename(log_path) if basename.startswith('(') and basename.endswith(')'): dirname = os.path.dirname(log_path) regex_pattern = re.compile(basename[1:-1] + '$') filenames = os.listdir(dirname) matched_files = [f for f in filenames if regex_pattern.match(f)] log_paths += [os.path.join(dirname, f) for f in matched_files] else: log_paths.append(log_path) if not log_paths: context['errors'] = 'Cannot find log data' else: # Use the first log to index fields. log_data = struct_tree.StructTree(log_paths[0], fail_silently=True, readonly=True) log_skeleton = log_data.Skeleton(depth=1) d3_data = struct_tree.DictToD3Tree(log_skeleton, '/') d3_data['expand_url'] = urlresolvers.reverse('browse_log', args=['']) request.session['log_paths'] = log_paths context['skeleton'] = json.dumps(d3_data) order_horizontally = True configs = _LoadConfigs() scenarios = layout_base.AssembleLayout( [ ('Signals', [ widgets.DictLinesWidget( 'series', None, interactive=True, use_markers=True), ]), ], desired_view_cols=1, order_horizontally=order_horizontally) layout_names = loader.LayoutLoader().ModuleNames() layout_names.sort() configs['scenarios'] = scenarios context.update(_PrepareContext(configs)) context['layout_names'] = layout_names context['content_width'] = settings.CSS_GRID_COLUMNS - 2 context['order_horizontally'] = order_horizontally _CreateAndAddClientIdToContext(context) return shortcuts.render(request, template_name, context, context_instance=template.RequestContext(request))
def testBasics(self): messages = struct_tree.StructTree( { 'MotorStatus': { 'MotorPbi': { 'status': 0, 'errors': [1, 2, 3], 'details': { 'temp': 60, 'voltage': 800 }, } } }, fail_silently=False, readonly=True) scenario = autogen.GenerateScenario(messages.Data(), 'Test') self.assertEqual( scenario, { 'signals': {}, 'canvas': { 'row_height_px': 40, 'grid_width': 12 }, 'views': [{ 'stripe': [{ 'indicators': [{ 'src': 'MotorStatus.MotorPbi.details.temp', 'name': 'details.temp', 'cols': 12, 'precision': None, 'indicator_src': None, 'template': 'ScalarIndicator', 'font_size': None, 'mode': 'horizontal' }, { 'src': 'MotorStatus.MotorPbi.details.voltage', 'name': 'details.voltage', 'cols': 12, 'precision': None, 'indicator_src': None, 'template': 'ScalarIndicator', 'font_size': None, 'mode': 'horizontal' }, { 'message_src': None, 'src': 'MotorStatus.MotorPbi.errors', 'mode': 'horizontal', 'template': 'ListIndicator', 'indicator_src': None, 'keys': ['[0]', '[1]', '[2]'], 'precision': None, 'cols': 12, 'font_size': None, 'name': 'errors' }, { 'src': 'MotorStatus.MotorPbi.status', 'name': 'status', 'cols': 12, 'precision': None, 'indicator_src': None, 'template': 'ScalarIndicator', 'font_size': None, 'mode': 'horizontal' }], 'rows': 3, 'name': 'MotorStatus.MotorPbi', 'grid_width': 12 }], 'grid_width': 12 }], 'filters': [], 'title': 'Test' })
checks.SetMinGap(FLAGS.min_gap) if FLAGS.cloud_path: results = autocheck.RunFromCloud(FLAGS.cloud_path, FLAGS.input_prefix, checks, FLAGS.gradebook, FLAGS.verbose) else: results = autocheck.RunFromLocal(FLAGS.directory, FLAGS.input_prefix, checks, FLAGS.gradebook, FLAGS.verbose) aggregated_results = None if FLAGS.verbose: print 'Concatenate errors in all files --------------------------------' aggregated_results = struct_tree.StructTree( autocheck.GatherResultsFromMultipleFiles(results), True) print json.dumps(aggregated_results.Data(), indent=2, cls=json_util.JsonNumpyEncoder, sort_keys=True) if FLAGS.output_file: if aggregated_results is None: aggregated_results = struct_tree.StructTree( autocheck.GatherResultsFromMultipleFiles(results), True) with open(FLAGS.output_file, 'w') as fp: json.dump(aggregated_results.Data(), fp, indent=2, cls=json_util.JsonNumpyEncoder, sort_keys=True)
def _CountFlightTime(log_file, omega_range, num_omega_bins, wind_speed_range, num_wind_speed_bins, unit): """Compute statistics about motor spins in logs. Duration is computed by counting the number of messages and divide it by the message frequency. Args: log_file: The log file to process. omega_range: The range of motor speeds: [lower bound, upper bound]. num_omega_bins: The number of histogram bins for motor speeds. wind_speed_range: The range of wind speeds: [lower bound, upper bound]. num_wind_speed_bins: The number of wind speed bins to characterize. unit: Unit of time, one of "sec", "min", and "hour". Returns: A Python dict in the form of: { 'omega_config': { # Bounds and bins for motor speed histograms. 'min': min_omega, 'max': max_omega, 'num_bins': num_omega_bins}, 'wind_config': { # Bounds and bins for wind speed breakdowns. 'min': min_wind_speed, 'max': max_wind_speed, 'num_bins': num_wind_speed_bins}, 'bin_edges': Bin edges for the motor speed histogram, 'category': The flight category, <motor_name>: { # e.g., 'MotorSbo' 'histogram': Time spent in each motor speed range, 'omega_cdf': Time pent above certain motor speed, 'wind_based_histogram': { wind speed [m/s]: Time spent in each motor speed range}, } } """ data = struct_tree.StructTree(log_file, True) message_template = string.Template( 'messages.kAioNode$aio_node.kMessageType$message_type') message_name = 'MotorStatus' motor_names = MOTOR_LABELS_HELPER.ShortNames() control_telemetry_messages = ['ControlDebug', 'ControlTelemetry'] control_telemetry_node = 'ControllerA' results = { 'category': _FlightCategory(data), 'omega_config': { 'min': omega_range[0], 'max': omega_range[1], 'num_bins': num_omega_bins, }, 'wind_config': { 'min': wind_speed_range[0], 'max': wind_speed_range[1], 'num_bins': num_wind_speed_bins }, } if 'messages' not in data: logging.error('Unable to process log "%s"', log_file) return results wind_speed = None for control_telemetry_message in control_telemetry_messages: control_telemetry_path = message_template.substitute({ 'message_type': control_telemetry_message, 'aio_node': control_telemetry_node, }) wind_speed = data['%s.message.state_est.wind_g.speed_f' % control_telemetry_path] if wind_speed is not None: wind_timestamp = log_util.LogTimestamp(data, control_telemetry_message, control_telemetry_node) break if wind_speed is not None: results['wind'] = {} results['wind']['avg_speed'] = numpy.average(wind_speed) results['wind']['max_speed'] = numpy.max(wind_speed) results['wind']['count'] = wind_speed.size wind_speed_bin_edges = numpy.linspace( wind_speed_range[0], wind_speed_range[1], num_wind_speed_bins) assert unit in ['sec', 'min', 'hour'] if unit == 'min': frequency_scale = 60.0 elif unit == 'hour': frequency_scale = 3600.0 else: frequency_scale = 1.0 parameters = {'message_type': message_name} for motor_code in motor_names: motor_name = 'Motor' + motor_code # Get the motor status and omega. parameters['aio_node'] = motor_name path = message_template.substitute(parameters) status = data['%s.message.%s' % (path, 'motor_status')] omega = data['%s.message.%s' % (path, 'omega')] timestamp = data['%s.capture_header.tv_sec' % path] if status is None: continue omega = numpy.abs(omega) # Select the subsequence to include for computation. clean_selection = log_util.MessageDedupSelection( data, 'MotorStatus', motor_name, message_template) clean_selection &= status == MOTOR_STATUS_HELPER.Value('Running') # Estimate timestep timestep = (float(timestamp[-1] - timestamp[0]) / timestamp.size / frequency_scale) # Compute the motor speed histogram in general. hist, _ = numpy.histogram( omega[clean_selection], bins=num_omega_bins, range=omega_range, density=False) # Convert message counts to duration. omega_hist = hist * float(timestep) results[motor_name] = { 'histogram': omega_hist, 'omega_cdf': numpy.cumsum(omega_hist[::-1])[::-1], 'wind_based_histogram': {}, } # Compute the wind-profile based motor speed histogram. if wind_speed is not None: motor_timestamp = log_util.LogTimestamp(data, message_name, motor_name) aligned_wind_speed = numpy.interp(motor_timestamp, wind_timestamp, wind_speed) for n in range(len(wind_speed_bin_edges) - 1): low_bar, high_bar = wind_speed_bin_edges[n:n+2] wind_selection = (clean_selection & (aligned_wind_speed >= low_bar) & (aligned_wind_speed < high_bar)) hist, _ = numpy.histogram(omega[wind_selection], bins=num_omega_bins, range=omega_range, density=False) results[motor_name]['wind_based_histogram'][ (low_bar + high_bar) / 2.0] = hist * float(timestep) return results
def _ProcessSimOutput(self, unused_config_id, sim_success, log_file_name): """Processes the simulator output. Args: unused_config_id: ID of the parameter file used. sim_success: Whether the sim was successful. log_file_name: Name of the log file to be parsed. Returns: See parent class. """ # TODO: Improve the following attempt to capture simulator # output such that it only captures the simulator output (i.e. via # some use of "tee"), works when run locally, etc. # TODO: Figure out a way to get rid of duplicated log entries, or # move the log capture to the batch_sim worker base class. sim_error_message = '' if not sim_success: sim_error_message = batch_sim_util.Tail('/var/log/syslog', 150) full_output = { 'sim_success': sim_success, 'sim_error_message': sim_error_message } # Only try to read the log file if the simulation succeeded. if sim_success: log_file = h5py.File(log_file_name, 'r') params = log_file['parameters'] messages = log_file['messages'] sim = messages['kAioNodeSimulator']['kMessageTypeSimTelemetry']['message'] control = (messages['kAioNodeControllerA']['kMessageTypeControlDebug'] ['message']) wing_model = int(params['system_params']['wing_model'][0]) for scoring_function in self._params.scoring_functions: assert scoring_function.GetName() not in full_output full_output[scoring_function.GetName()] = ( scoring_function.GetOutput( scoring_function.GetTimeSeries(params, sim, control))) log_file.close() if self._scoring_events: checklist = crosswind_sweep_checks.CrosswindSweepChecks( True, wing_model) checklist.SetMinGap(10) # TODO: Clean up the description which can be different from # the original scoring functions (i.e. % of saturation --> saturation). check_results = autocheck.RunFromLocal( os.path.dirname(log_file_name), os.path.basename(log_file_name), checklist, None, False) full_output['events'] = struct_tree.StructTree( autocheck.GatherResultsFromMultipleFiles(check_results), True).Data() return json.dumps(full_output, indent=2, separators=(',', ': '), cls=json_util.JsonNumpyEncoder)