async def created_event(self, payload): """ The VEN informs us that they created an EiEvent. """ ven_id = payload['ven_id'] if self.polling_method == 'internal': for event_response in payload['event_responses']: event_id = event_response['event_id'] modification_number = event_response['modification_number'] opt_type = event_response['opt_type'] event = utils.find_by(self.events[ven_id], 'event_descriptor.event_id', event_id, 'event_descriptor.modification_number', modification_number) if not event: if event_id not in self.completed_event_ids.get( ven_id, []): logger.warning( f"""Got an oadrCreatedEvent message from ven '{ven_id}' """ f"""for event '{event_id}' with modification number """ f"""{modification_number} that does not exist.""") raise errors.InvalidIdError # Remove the event from the events list if the cancellation is confirmed. if utils.getmember(event, 'event_descriptor.event_status' ) == enums.EVENT_STATUS.CANCELLED: utils.pop_by(self.events[ven_id], 'event_descriptor.event_id', event_id) if event_response['event_id'] in self.event_callbacks: event, callback = self.event_callbacks.pop(event_id) if isinstance(callback, asyncio.Future): if callback.done(): logger.warning( f"Got a second response '{opt_type}' from ven '{ven_id}' " f"to event '{event_id}', which we cannot use because the " "callback future you provided was already completed during " "the first response.") else: callback.set_result(opt_type) else: result = callback(ven_id=ven_id, event_id=event_id, opt_type=opt_type) if asyncio.iscoroutine(result): result = await result else: for event_response in payload['event_responses']: event_id = event_response['event_id'] opt_type = event_response['opt_type'] result = await utils.await_if_required( self.on_created_event(ven_id=ven_id, event_id=event_id, opt_type=opt_type)) return 'oadrResponse', {}
def test_find_by_nested_dict(): search_list = [{ 'dict1': { 'a': 123, 'b': 456 } }, { 'dict1': { 'a': 321, 'b': 654, 'c': 1000 } }] result = utils.find_by(search_list, 'dict1.c', 1000) assert result == {'dict1': {'a': 321, 'b': 654, 'c': 1000}}
def cancel_event(self, ven_id, event_id): """ Mark the indicated event as cancelled. """ event = utils.find_by(self.events[ven_id], 'event_descriptor.event_id', event_id) if not event: logger.error("""The event you tried to cancel was not found. """ """Was looking for event_id {event_id} for ven {ven_id}.""" """Only found these: [getmember(e, 'event_descriptor.event_id') for e in self.events[ven_id]]""") return # Set the Event Status to cancelled utils.setmember(event, 'event_descriptor.event_status', enums.EVENT_STATUS.CANCELLED) utils.increment_event_modification_number(event) self.events_updated[ven_id] = True
async def register_report(self, payload): """ Handle the VENs reporting capabilities. """ report_requests = [] args = inspect.signature(self.on_register_report).parameters if all(['measurement' in args, 'resource_id' in args, 'min_sampling_interval' in args, 'max_sampling_interval' in args, 'unit' in args, 'scale' in args]): for report in payload['reports']: result = [self.on_register_report(resource_id=rd['report_subject']['resource_id'], measurement=rd['measurement']['item_description'], unit=rd['measurement']['item_units'], scale=rd['measurement']['si_scale_code'], min_sampling_interval=rd['sampling_rate']['min_period'], max_sampling_interval=rd['sampling_rate']['max_period']) for rd in report['report_descriptions']] if iscoroutine(result[0]): result = await gather(*result) result = [(report['report_descriptions'][i]['r_id'], *result[i]) for i in range(len(report['report_descriptions']))] report_requests.append(result) else: # Use the 'full' mode for openADR reporting result = [self.on_register_report(report) for report in payload['reports']] if iscoroutine(result[0]): result = await gather(*result) # Now we have r_id, callback, sampling_rate report_requests = result for i, report_request in enumerate(report_requests): if report_request is not None: if not all(len(rrq) in (3, 4) for rrq in report_request): logger.error("Your on_register_report handler did not return a valid response") # Validate the report requests for i, report_request in enumerate(report_requests): if report_request is None: continue # Check if all sampling rates per report_request are the same sampling_interval = min(rrq[2] for rrq in report_request if rrq is not None) if not all(rrq is not None and report_request[0][2] == sampling_interval for rrq in report_request): logger.error("OpenADR does not support multiple different sampling rates per " "report. OpenLEADR will set all sampling rates to " f"{sampling_interval}") # Form the report request oadr_report_requests = [] for i, report_request in enumerate(report_requests): if report_request is None: continue orig_report = payload['reports'][i] report_specifier_id = orig_report['report_specifier_id'] report_request_id = generate_id() specifier_payloads = [] for rrq in report_request: if len(rrq) == 3: r_id, callback, sampling_interval = rrq report_interval = sampling_interval elif len(rrq) == 4: r_id, callback, sampling_interval, report_interval = rrq report_description = find_by(orig_report['report_descriptions'], 'r_id', r_id) reading_type = report_description['reading_type'] specifier_payloads.append(objects.SpecifierPayload(r_id=r_id, reading_type=reading_type)) # Append the callback to our list of known callbacks self.report_callbacks[(report_request_id, r_id)] = callback # Add the ReportSpecifier to the ReportRequest report_specifier = objects.ReportSpecifier(report_specifier_id=report_specifier_id, granularity=sampling_interval, report_back_duration=report_interval, specifier_payloads=specifier_payloads) # Add the ReportRequest to our outgoing message oadr_report_requests.append(objects.ReportRequest(report_request_id=report_request_id, report_specifier=report_specifier)) # Put the report requests back together response_type = 'oadrRegisteredReport' response_payload = {'report_requests': oadr_report_requests} return response_type, response_payload
async def update_report(self, report_request_id): """ Call the previously registered report callback and send the result as a message to the VTN. """ logger.debug(f"Running update_report for {report_request_id}") report_request = find_by(self.report_requests, 'report_request_id', report_request_id) granularity = report_request['granularity'] report_back_duration = report_request['report_back_duration'] report_specifier_id = report_request['report_specifier_id'] report = find_by(self.reports, 'report_specifier_id', report_specifier_id) data_collection_mode = report.data_collection_mode if report_request_id in self.incomplete_reports: logger.debug("We were already compiling this report") outgoing_report = self.incomplete_reports[report_request_id] else: logger.debug("There is no report in progress") outgoing_report = objects.Report(report_request_id=report_request_id, report_specifier_id=report.report_specifier_id, report_name=report.report_name, intervals=[]) intervals = outgoing_report.intervals or [] if data_collection_mode == 'full': if report_back_duration is None: report_back_duration = granularity date_to = datetime.now(timezone.utc) date_from = date_to - max(report_back_duration, granularity) for r_id in report_request['r_ids']: report_callback = self.report_callbacks[(report_specifier_id, r_id)] result = report_callback(date_from=date_from, date_to=date_to, sampling_interval=granularity) if asyncio.iscoroutine(result): result = await result for dt, value in result: report_payload = objects.ReportPayload(r_id=r_id, value=value) intervals.append(objects.ReportInterval(dtstart=dt, report_payload=report_payload)) else: for r_id in report_request['r_ids']: report_callback = self.report_callbacks[(report_specifier_id, r_id)] result = report_callback() if asyncio.iscoroutine(result): result = await result if isinstance(result, (int, float)): result = [(datetime.now(timezone.utc), result)] for dt, value in result: logger.info(f"Adding {dt}, {value} to report") report_payload = objects.ReportPayload(r_id=r_id, value=value) intervals.append(objects.ReportInterval(dtstart=dt, report_payload=report_payload)) outgoing_report.intervals = intervals logger.info(f"The number of intervals in the report is now {len(outgoing_report.intervals)}") # Figure out if the report is complete after this sampling if data_collection_mode == 'incremental' and report_back_duration is not None\ and report_back_duration > granularity: report_interval = report_back_duration.total_seconds() sampling_interval = granularity.total_seconds() expected_len = len(report_request['r_ids']) * int(report_interval / sampling_interval) if len(outgoing_report.intervals) == expected_len: logger.info("The report is now complete with all the values. Will queue for sending.") await self.pending_reports.put(self.incomplete_reports.pop(report_request_id)) else: logger.debug("The report is not yet complete, will hold until it is.") self.incomplete_reports[report_request_id] = outgoing_report else: logger.info("Report will be sent now.") await self.pending_reports.put(outgoing_report)
async def create_report(self, report_request): """ Add the requested reports to the reporting mechanism. This is called when the VTN requests reports from us. :param report_request dict: The oadrReportRequest dict from the VTN. """ # Get the relevant variables from the report requests report_request_id = report_request['report_request_id'] report_specifier_id = report_request['report_specifier']['report_specifier_id'] report_back_duration = report_request['report_specifier'].get('report_back_duration') granularity = report_request['report_specifier']['granularity'] # Check if this report actually exists report = find_by(self.reports, 'report_specifier_id', report_specifier_id) if not report: logger.error(f"A non-existant report with report_specifier_id " f"{report_specifier_id} was requested.") return False # Check and collect the requested r_ids for this report requested_r_ids = [] for specifier_payload in report_request['report_specifier']['specifier_payloads']: r_id = specifier_payload['r_id'] # Check if the requested r_id actually exists rd = find_by(report.report_descriptions, 'r_id', r_id) if not rd: logger.error(f"A non-existant report with r_id {r_id} " f"inside report with report_specifier_id {report_specifier_id} " f"was requested.") continue # Check if the requested measurement exists and if the correct unit is requested if 'measurement' in specifier_payload: measurement = specifier_payload['measurement'] if measurement['item_description'] != rd.measurement.item_description: logger.error(f"A non-matching measurement description for report with " f"report_request_id {report_request_id} and r_id {r_id} was given " f"by the VTN. Offered: {rd.measurement.item_description}, " f"requested: {measurement['item_description']}") continue if measurement['item_units'] != rd.measurement.item_units: logger.error(f"A non-matching measurement unit for report with " f"report_request_id {report_request_id} and r_id {r_id} was given " f"by the VTN. Offered: {rd.measurement.item_units}, " f"requested: {measurement['item_units']}") continue if granularity is not None: if not rd.sampling_rate.min_period <= granularity <= rd.sampling_rate.max_period: logger.error(f"An invalid sampling rate {granularity} was requested for report " f"with report_specifier_id {report_specifier_id} and r_id {r_id}. " f"The offered sampling rate was between " f"{rd.sampling_rate.min_period} and " f"{rd.sampling_rate.max_period}") continue else: # If no granularity is specified, set it to the lowest sampling rate. granularity = rd.sampling_rate.max_period requested_r_ids.append(r_id) callback = partial(self.update_report, report_request_id=report_request_id) reporting_interval = report_back_duration or granularity job = self.scheduler.add_job(func=callback, trigger='cron', **cron_config(reporting_interval)) self.report_requests.append({'report_request_id': report_request_id, 'report_specifier_id': report_specifier_id, 'report_back_duration': report_back_duration, 'r_ids': requested_r_ids, 'granularity': granularity, 'job': job})
def add_report(self, callback, resource_id, measurement, data_collection_mode='incremental', report_specifier_id=None, r_id=None, report_name=enums.REPORT_NAME.TELEMETRY_USAGE, reading_type=enums.READING_TYPE.DIRECT_READ, report_type=enums.REPORT_TYPE.READING, sampling_rate=None, data_source=None, scale="none", unit=None, power_ac=True, power_hertz=50, power_voltage=230, market_context=None): """ Add a new reporting capability to the client. :param callable callback: A callback or coroutine that will fetch the value for a specific report. This callback will be passed the report_id and the r_id of the requested value. :param str resource_id: A specific name for this resource within this report. :param str measurement: The quantity that is being measured (openleadr.enums.MEASUREMENTS). :param str data_collection_mode: Whether you want the data to be collected incrementally or at once. If the VTN requests the sampling interval to be higher than the reporting interval, this setting determines if the callback should be called at the sampling rate (with no args, assuming it returns the current value), or at the reporting interval (with date_from and date_to as keyword arguments). Choose 'incremental' for the former case, or 'full' for the latter case. :param str report_specifier_id: A unique identifier for this report. Leave this blank for a random generated id, or fill it in if your VTN depends on this being a known value, or if it needs to be constant between restarts of the client. :param str r_id: A unique identifier for a datapoint in a report. The same remarks apply as for the report_specifier_id. :param str report_name: An OpenADR name for this report (one of openleadr.enums.REPORT_NAME) :param str reading_type: An OpenADR reading type (found in openleadr.enums.READING_TYPE) :param str report_type: An OpenADR report type (found in openleadr.enums.REPORT_TYPE) :param datetime.timedelta sampling_rate: The sampling rate for the measurement. :param str unit: The unit for this measurement. """ # Verify input if report_name not in enums.REPORT_NAME.values and not report_name.startswith('x-'): raise ValueError(f"{report_name} is not a valid report_name. Valid options are " f"{', '.join(enums.REPORT_NAME.values)}", " or any name starting with 'x-'.") if reading_type not in enums.READING_TYPE.values and not reading_type.startswith('x-'): raise ValueError(f"{reading_type} is not a valid reading_type. Valid options are " f"{', '.join(enums.READING_TYPE.values)}" " or any name starting with 'x-'.") if report_type not in enums.REPORT_TYPE.values and not report_type.startswith('x-'): raise ValueError(f"{report_type} is not a valid report_type. Valid options are " f"{', '.join(enums.REPORT_TYPE.values)}" " or any name starting with 'x-'.") if scale not in enums.SI_SCALE_CODE.values: raise ValueError(f"{scale} is not a valid scale. Valid options are " f"{', '.join(enums.SI_SCALE_CODE.values)}") if sampling_rate is None: sampling_rate = objects.SamplingRate(min_period=timedelta(seconds=10), max_period=timedelta(hours=24), on_change=False) elif isinstance(sampling_rate, timedelta): sampling_rate = objects.SamplingRate(min_period=sampling_rate, max_period=sampling_rate, on_change=False) if data_collection_mode not in ('incremental', 'full'): raise ValueError("The data_collection_mode should be 'incremental' or 'full'.") if data_collection_mode == 'full': args = inspect.signature(callback).parameters if not ('date_from' in args and 'date_to' in args and 'sampling_interval' in args): raise TypeError("Your callback function must accept the 'date_from', 'date_to' " "and 'sampling_interval' arguments if used " "with data_collection_mode 'full'.") # Determine the correct item name, item description and unit if isinstance(measurement, objects.Measurement): item_base = measurement elif measurement.upper() in enums.MEASUREMENTS.members: item_base = enums.MEASUREMENTS[measurement.upper()] else: item_base = objects.Measurement(item_name='customUnit', item_description=measurement, item_units=unit, si_scale_code=scale) if scale is not None: if scale in enums.SI_SCALE_CODE.values: item_base.si_scale_code = scale else: raise ValueError("The 'scale' argument must be one of '{'. ',join(enums.SI_SCALE_CODE.values)}") # Check if unit is compatible if unit is not None and unit != item_base.item_units \ and unit not in item_base.acceptable_units: logger.warning(f"The supplied unit {unit} for measurement {measurement} " f"will be ignored, {item_base.item_units} will be used instead." f"Allowed units for this measurement are: " f"{', '.join(item_base.acceptable_units)}") # Get or create the relevant Report if report_specifier_id: report = find_by(self.reports, 'report_name', report_name, 'report_specifier_id', report_specifier_id) else: report = find_by(self.reports, 'report_name', report_name) if not report: report_specifier_id = report_specifier_id or generate_id() report = objects.Report(created_date_time=datetime.now(), report_name=report_name, report_specifier_id=report_specifier_id, data_collection_mode=data_collection_mode) self.reports.append(report) # Add the new report description to the report target = objects.Target(resource_id=resource_id) r_id = generate_id() report_description = objects.ReportDescription(r_id=r_id, reading_type=reading_type, report_data_source=target, report_subject=target, report_type=report_type, sampling_rate=sampling_rate, measurement=item_base, market_context='Market01') self.report_callbacks[(report.report_specifier_id, r_id)] = callback report.report_descriptions.append(report_description)
async def _on_event(self, message): logger.debug("The VEN received an event") events = message['events'] try: results = [] for event in message['events']: event_id = event['event_descriptor']['event_id'] event_status = event['event_descriptor']['event_status'] modification_number = event['event_descriptor'][ 'modification_number'] received_event = utils.find_by(self.received_events, 'event_descriptor.event_id', event_id) if received_event: if received_event['event_descriptor'][ 'modification_number'] == modification_number: # Re-submit the same opt type as we already had previously result = self.responded_events[event_id] else: # Replace the event with the fresh copy utils.pop_by(self.received_events, 'event_descriptor.event_id', event_id) self.received_events.append(event) # Wait for the result of the on_update_event handler result = await utils.await_if_required( self.on_update_event(event)) else: # Wait for the result of the on_event self.received_events.append(event) result = self.on_event(event) if asyncio.iscoroutine(result): result = await result results.append(result) if event_status in (enums.EVENT_STATUS.COMPLETED, enums.EVENT_STATUS.CANCELLED): self.responded_events.pop(event_id) else: self.responded_events[event_id] = result for i, result in enumerate(results): if result not in ( 'optIn', 'optOut' ) and events[i]['response_required'] == 'always': logger.error( "Your on_event or on_update_event handler must return 'optIn' or 'optOut'; " f"you supplied {result}. Please fix your on_event handler." ) results[i] = 'optOut' except Exception as err: logger.error( "Your on_event handler encountered an error. Will Opt Out of the event. " f"The error was {err.__class__.__name__}: {str(err)}") results = ['optOut'] * len(events) event_responses = [ { 'response_code': 200, 'response_description': 'OK', 'opt_type': results[i], 'request_id': message['request_id'], 'modification_number': modification_number, 'event_id': events[i]['event_descriptor']['event_id'] } for i, event in enumerate(events) if event['response_required'] == 'always' and not utils. determine_event_status(event['active_period']) == 'completed' ] if len(event_responses) > 0: response = { 'response_code': 200, 'response_description': 'OK', 'request_id': message['request_id'] } message = self._create_message('oadrCreatedEvent', response=response, event_responses=event_responses, ven_id=self.ven_id) service = 'EiEvent' response_type, response_payload = await self._perform_request( service, message) logger.info(response_type, response_payload) else: logger.info( "Not sending any event responses, because a response was not required/allowed by the VTN." )
def test_find_by_with_missing_member(): search_list = [{'a': 123, 'b': 456}, {'a': 321, 'b': 654, 'c': 1000}] result = utils.find_by(search_list, 'c', 1000) assert result == {'a': 321, 'b': 654, 'c': 1000}
def test_find_by_with_dict(): search_dict = {'one': {'a': 123, 'b': 456}, 'two': {'a': 321, 'b': 654}} result = utils.find_by(search_dict, 'a', 123) assert result == {'a': 123, 'b': 456}
async def register_report(self, payload): """ Handle the VENs reporting capabilities. """ report_requests = [] args = inspect.signature(self.on_register_report).parameters if all([ 'ven_id' in args, 'resource_id' in args, 'measurement' in args, 'min_sampling_interval' in args, 'max_sampling_interval' in args, 'unit' in args, 'scale' in args ]): mode = 'compact' else: mode = 'full' if payload['reports'] is None: return for report in payload['reports']: if report['report_name'] == 'METADATA_TELEMETRY_STATUS': if mode == 'compact': results = [ self.on_register_report( ven_id=payload['ven_id'], resource_id=rd.get('report_data_source', {}).get('resource_id'), measurement='Status', unit=None, scale=None, min_sampling_interval=rd['sampling_rate'] ['min_period'], max_sampling_interval=rd['sampling_rate'] ['max_period']) for rd in report['report_descriptions'] ] results = await utils.gather_if_required(results) elif mode == 'full': results = await utils.await_if_required( self.on_register_report(report)) elif report['report_name'] == 'METADATA_TELEMETRY_USAGE': if mode == 'compact': results = [ self.on_register_report( ven_id=payload['ven_id'], resource_id=rd.get('report_data_source', {}).get('resource_id'), measurement=rd['measurement']['description'], unit=rd['measurement']['unit'], scale=rd['measurement']['scale'], min_sampling_interval=rd['sampling_rate'] ['min_period'], max_sampling_interval=rd['sampling_rate'] ['max_period']) for rd in report['report_descriptions'] ] results = await utils.gather_if_required(results) elif mode == 'full': results = await utils.await_if_required( self.on_register_report(report)) elif report['report_name'] in ('METADATA_HISTORY_USAGE', 'METADATA_HISTORY_GREENBUTTON'): if payload['ven_id'] not in self.registered_reports: self.registered_reports[payload['ven_id']] = [] report['report_name'] = report['report_name'][9:] self.registered_reports[payload['ven_id']].append(report) report_requests.append(None) continue else: logger.warning( "Reports other than TELEMETRY_USAGE, TELEMETRY_STATUS, " "HISTORY_USAGE and HISTORY_GREENBUTTON are not yet supported. " f"Skipping report with name {report['report_name']}.") report_requests.append(None) continue # Perform some rudimentary checks on the returned type if results is not None: if not isinstance(results, list): logger.error( "Your on_register_report handler must return a list of tuples or None; " f"it returned '{results}' ({results.__class__.__name__})." ) results = None else: for i, r in enumerate(results): if r is None: continue if not isinstance(r, tuple): if mode == 'compact': logger.error( "Your on_register_report handler must return a tuple or None; " f"it returned '{r}' ({r.__class__.__name__})." ) elif mode == 'full': logger.error( "Your on_register_report handler must return a list of tuples or None; " f"The first item from the list was '{r}' ({r.__class__.__name__})." ) results[i] = None # If we used compact mode, prepend the r_id to each result # (this is already there when using the full mode) if mode == 'compact': results = [ (report['report_descriptions'][i]['r_id'], *results[i]) for i in range(len(report['report_descriptions'])) if isinstance(results[i], tuple) ] report_requests.append(results) utils.validate_report_request_tuples(report_requests, mode=mode) for i, report_request in enumerate(report_requests): if report_request is None or len(report_request) == 0 or all( rrq is None for rrq in report_request): continue # Check if all sampling rates per report_request are the same sampling_interval = min(rrq[2] for rrq in report_request if isinstance(rrq, tuple)) if not all(rrq is not None and report_request[0][2] == sampling_interval for rrq in report_request): logger.error( "OpenADR does not support multiple different sampling rates per " "report. OpenLEADR will set all sampling rates to " f"{sampling_interval}") # Form the report request oadr_report_requests = [] for i, report_request in enumerate(report_requests): if report_request is None or len(report_request) == 0 or all( rrq is None for rrq in report_request): continue orig_report = payload['reports'][i] report_specifier_id = orig_report['report_specifier_id'] report_request_id = utils.generate_id() specifier_payloads = [] for rrq in report_request: if len(rrq) == 3: r_id, callback, sampling_interval = rrq report_interval = sampling_interval elif len(rrq) == 4: r_id, callback, sampling_interval, report_interval = rrq report_description = utils.find_by( orig_report['report_descriptions'], 'r_id', r_id) reading_type = report_description['reading_type'] specifier_payloads.append( objects.SpecifierPayload(r_id=r_id, reading_type=reading_type)) # Append the callback to our list of known callbacks self.report_callbacks[(report_request_id, r_id)] = callback # Add the ReportSpecifier to the ReportRequest report_specifier = objects.ReportSpecifier( report_specifier_id=report_specifier_id, granularity=sampling_interval, report_back_duration=report_interval, specifier_payloads=specifier_payloads) # Add the ReportRequest to our outgoing message oadr_report_requests.append( objects.ReportRequest(report_request_id=report_request_id, report_specifier=report_specifier)) # Put the report requests back together response_type = 'oadrRegisteredReport' response_payload = {'report_requests': oadr_report_requests} # Store the requested reports self.requested_reports[payload['ven_id']] = oadr_report_requests return response_type, response_payload