def publish_record(self, topic_suffix, message):
     headers = {
         headers_mod.DATE: utils.format_timestamp(utils.get_aware_utc_now())
     }
     message["TimeStamp"] = utils.format_timestamp(self.current_datetime)
     topic = "/".join([self.record_topic, topic_suffix])
     self.vip.pubsub.publish("pubsub", topic, headers, message).get()
    def schedule_run(self,
                     cur_exp_time,
                     cur_analysis_time,
                     start_of_cycle=False):
        # 191218DJH: The logic in this section should be REPLACED by the new market state machine. See method go().
        """
        Run when first start or run at beginning of hour
        :return:
        """
        # Balance
        market = self.markets[0]  # Assume only 1 TNS market per node
        # market.balance(self)
        # market.events(self)
        # self.campus.prep_transactive_signal(market, self)
        # self.campus.send_transactive_signal(self, self.city_supply_topic,
        #                                    start_of_cycle=start_of_cycle)

        # Schedule to run next hour with start_of_cycle = True
        cur_exp_time = parser.parse(cur_exp_time)
        cur_analysis_time = parser.parse(cur_analysis_time)
        next_exp_time, next_analysis_time = self.get_next_exp_time(
            cur_exp_time, cur_analysis_time)
        self.core.schedule(next_exp_time,
                           self.schedule_run,
                           format_timestamp(next_exp_time),
                           format_timestamp(next_analysis_time),
                           start_of_cycle=True)
    def schedule_run(self,
                     cur_exp_time,
                     cur_analysis_time,
                     start_of_cycle=False):
        """
        Run when first start or run at beginning of hour
        :return:
        """
        # Balance
        market = self.markets[0]  # Assume only 1 TNS market per node
        market.balance(self)
        prices = market.marginalPrices
        prices = prices[-25:]
        prices = [x.value for x in prices]
        _time = format_timestamp(Timer.get_cur_time())
        self.vip.pubsub.publish(peer='pubsub',
                                topic=self.price_topic,
                                message={
                                    'prices': prices,
                                    'current_time': _time
                                })
        self.campus.model.prep_transactive_signal(market, self)
        self.campus.model.send_transactive_signal(
            self, self.city_supply_topic, start_of_cycle=start_of_cycle)

        # Schedule to run next hour with start_of_cycle = True
        cur_exp_time = parser.parse(cur_exp_time)
        cur_analysis_time = parser.parse(cur_analysis_time)
        next_exp_time, next_analysis_time = self.get_next_exp_time(
            cur_exp_time, cur_analysis_time)
        self.core.schedule(next_exp_time,
                           self.schedule_run,
                           format_timestamp(next_exp_time),
                           format_timestamp(next_analysis_time),
                           start_of_cycle=True)
Beispiel #4
0
 def schedule_for_actuator(self, groups):
     # create start and end timestamps
     _now = get_aware_utc_now()
     str_start = format_timestamp(_now)
     _end = _now + td(seconds=10)
     str_end = format_timestamp(_end)
     schedule_request = []
     # wrap the topic and timestamps up in a list and add it to the schedules list
     _log.debug(
         f'[Conninuous Roller Agent INFO] - ACTUATOR DEBUG GROUP IS {groups}'
     )
     for group in groups:
         for key, value in self.nested_group_map[group].items():
             if key not in ('score', 'shed_count'):
                 topic_sched_group_l1n = '/'.join(
                     [self.building_topic, str(value)])
                 schedule_request.append(
                     [topic_sched_group_l1n, str_start, str_end])
     # send the request to the actuator
     result = self.vip.rpc.call('platform.actuator', 'request_new_schedule',
                                self.core.identity, 'my_schedule', 'HIGH',
                                schedule_request).get(timeout=90)
     _log.debug(
         f'[Conninuous Roller Agent INFO] - ACTUATOR SCHEDULE EVENT SUCESS {result}'
     )
 def publish_data(self, topic, value, time_stamp):
     headers = {headers_mod.DATE: format_timestamp(get_aware_utc_now())}
     message = {"Value": value}
     message["TimeStamp"] = format_timestamp(time_stamp)
     topic = "/".join([self.logging_topic, topic])
     _log.debug("LOGGING {} - {} - {}".format(topic, value, time_stamp))
     self.parent.vip.pubsub.publish("pubsub", topic, headers, message).get()
Beispiel #6
0
    def schedule_run(self,
                     cur_exp_time,
                     cur_analysis_time,
                     start_of_cycle=False):
        """
        Run when first start or run at beginning of hour
        :return:
        """
        # Balance
        market = self.markets[0]  # Assume only 1 TNS market per node
        market.balance(self)
        self.campus.model.prep_transactive_signal(market, self)
        self.campus.model.send_transactive_signal(
            self, self.city_supply_topic, start_of_cycle=start_of_cycle)

        # Schedule to run next hour with start_of_cycle = True
        cur_exp_time = parser.parse(cur_exp_time)
        cur_analysis_time = parser.parse(cur_analysis_time)
        next_exp_time, next_analysis_time = self.get_next_exp_time(
            cur_exp_time, cur_analysis_time)
        self.core.schedule(next_exp_time,
                           self.schedule_run,
                           format_timestamp(next_exp_time),
                           format_timestamp(next_analysis_time),
                           start_of_cycle=True)
Beispiel #7
0
    def publish_target_info_pgne(self, cur_analysis_time_utc):
        cur_analysis_time_utc = parser.parse(cur_analysis_time_utc)

        target_messages = self.get_target_info_pgne(format_timestamp(cur_analysis_time_utc), 'UTC')
        if len(target_messages) > 0:

            target_topic = '/'.join(['analysis', 'target_agent', self.site, self.building, 'goal'])
            for target_message in target_messages:
                headers = {'Date': format_timestamp(get_aware_utc_now())}
                self.vip.pubsub.publish(
                    'pubsub', target_topic, headers, target_message).get(timeout=15)
                _log.debug("TargetAgent {topic}: {value}".format(
                    topic=target_topic,
                    value=target_message))
                gevent.sleep(2)

        # Schedule next run at min 30 of next hour only if current min >= 30
        one_hour = timedelta(hours=1)
        cur_min = cur_analysis_time_utc.minute
        next_analysis_time = cur_analysis_time_utc.replace(minute=30,
                                                           second=0,
                                                           microsecond=0)
        if cur_min >= 30:
            next_analysis_time += one_hour

        next_run_time = next_analysis_time
        if self.dr_mode == 'dev':
            next_run_time = get_aware_utc_now() + timedelta(seconds=15)

        if self.dr_mode != 'manual':
            self.core.schedule(next_run_time, self.publish_target_info,
                               format_timestamp(next_analysis_time))
Beispiel #8
0
 def on_ilc_start(self, peer, sender, bus, topic, headers, message):
     cur_time = self.local_tz.localize(datetime.now())
     cur_time_utc = cur_time.astimezone(pytz.utc)
     one_hour = timedelta(hours=1)
     prev_time_utc = cur_time_utc - one_hour
     self.publish_target_info(format_timestamp(prev_time_utc))
     self.publish_target_info(format_timestamp(cur_time_utc))
Beispiel #9
0
    def get_forecast_weatherservice(self, mkt):
        """
        Uses VOLTTRON DarkSky weather agent running on local or remote platform to
        get 24 hour forecast for weather data.
        :param mkt:
        :return:
        """
        weather_results = None
        weather_data = None
        try:
            result = self.parent.vip.rpc.call(self.weather_vip,
                                              "get_hourly_forecast",
                                              self.location,
                                              external_platform=self.remote_platform).get(timeout=15)
            weather_results = result[0]["weather_results"]

        except (gevent.Timeout, RemoteError) as ex:
            _log.warning("RPC call to {} failed for weather forecast: {}".format(self.weather_vip, ex))

        if weather_results is not None:
            try:
                weather_data = [[parser.parse(oat[0]).astimezone(self.localtz), oat[1][self.oat_point_name]] for oat in weather_results]
                weather_data = [[oat[0].replace(tzinfo=None), oat[1]] for oat in weather_data]
            except KeyError:
                if not self.predictedValues:
                    raise Exception("Measurement Point Name is not correct")

            # How do we deal with never getting weather information?  Exit?
            except Exception as ex:
                if not self.predictedValues:
                    raise Exception("Exception {} processing weather data.".format(ex))

        # Copy weather data to predictedValues
        if weather_data is not None:
            self.predictedValues = []
            for ti in mkt.timeIntervals:
                # Find item which has the same timestamp as ti.timeStamp
                start_time = ti.startTime.replace(minute=0)
                items = [x[1] for x in weather_data if x[0] == start_time]

                # Create interval value and add it to predicted values
                if items:
                    temp = items[0]
                    interval_value = IntervalValue(self, ti, mkt, MeasurementType.PredictedValue, temp)
                    self.predictedValues.append(interval_value)
        elif self.predictedValues:
            hour_gap = mkt.timeIntervals[0].startTime - self.predictedValues[0].timeInterval.startTime
            max_hour_gap = timedelta(hours=4)
            if hour_gap > max_hour_gap:
                self.predictedValues = []
                raise Exception('No weather data for time: {}'.format(utils.format_timestamp(mkt.timeIntervals[0].startTime)))
            else:
                predictedValues = []
                for i in range(1, len(mkt.timeIntervals)):
                    interval_value = IntervalValue(self, mkt.timeIntervals[i], mkt, MeasurementType.PredictedValue, self.predictedValues[i-1].value)
                    predictedValues.append(interval_value)
                self.predictedValues = predictedValues
        else:
            raise Exception(
                'No weather data for time: {}'.format(utils.format_timestamp(mkt.timeIntervals[0].startTime)))
Beispiel #10
0
def test_record_topic(publish_agent, query_agent):
    """
    Test if record topic message is getting forwarded to historian running on
    another instance.

    :param publish_agent: Fake agent used to publish messages to bus in
    volttron_instance1. Calling this fixture makes sure all the dependant
    fixtures are called to setup and start volttron_instance1 and forwareder
    agent and returns the  instance of fake agent to publish

    :param query_agent: Fake agent used to query sqlhistorian in
    volttron_instance2. Calling this fixture makes sure all the dependant
    fixtures are called to setup and start volttron_instance2 and sqlhistorian
    agent and returns the instance of a fake agent to query the historian
    """
    # Create timestamp
    print("\n** test_record_topic **")
    now = utils.format_timestamp(datetime.utcnow())
    print("now is ", now)
    headers = {
        headers_mod.DATE: now,
        headers_mod.TIMESTAMP: now
    }
    # Publish messages
    publish(publish_agent, topics.RECORD, headers, 1)

    # sleep so that records gets inserted with unique timestamp
    gevent.sleep(0.5)
    time2 = utils.format_timestamp(datetime.utcnow())
    headers = {
        headers_mod.DATE: time2,
        headers_mod.TIMESTAMP: time2
    }
    publish(publish_agent, topics.RECORD, headers, 'value0')
    # sleep so that records gets inserted with unique timestamp
    gevent.sleep(0.5)
    time3 = utils.format_timestamp(datetime.utcnow())
    headers = {
        headers_mod.DATE: time3,
        headers_mod.TIMESTAMP: time3
    }
    publish(publish_agent, topics.RECORD, headers, {'key': 'value'})
    gevent.sleep(0.5)
    result = query_agent.vip.rpc.call('platform.historian',
                                      'query',
                                      topic=topics.RECORD,
                                      start=now,
                                      order="FIRST_TO_LAST").get(timeout=10)
    print('Query Result', result)
    assert (len(result['values']) == 3)
    assert (result['values'][0][1] == 1)
    assert (result['values'][1][1] == 'value0')
    assert (result['values'][2][1] == {'key': 'value'})
    assert result['values'][2][0] == time3 + '+00:00'
Beispiel #11
0
def validate_cache_result_forecast(locations, api_result, cache_result):
    for result in api_result:
        time_in_results = False
        for cr in cache_result:
            if utils.format_timestamp(cr[2]) == result["generation_time"]:
                for record in result["weather_results"]:
                    if utils.format_timestamp(cr[3]).startswith(record[0]):
                        time_in_results = True
                        assert jsonapi.loads(cr[1]) in locations
                        assert record[1] == jsonapi.loads(cr[4])
                        break
        assert time_in_results
Beispiel #12
0
def validate_cache_result_forecast(locations, api_result, cache_result):
    for result in api_result:
        time_in_results = False
        for cr in cache_result:
            if utils.format_timestamp(cr[2]) == result["generation_time"]:
                for record in result["weather_results"]:
                    if utils.format_timestamp(cr[3]).startswith(record[0]):
                        time_in_results = True
                        assert ujson.loads(cr[1]) in locations
                        assert record[1] == ujson.loads(cr[4])
                        break
        assert time_in_results
Beispiel #13
0
    def train_components(self, now=None):
        """Gather training parameters, query historian for training data,
        then pass data to model for further processing

        :param now: timestamp for simulation mode
        """
        if self.next_training is None:
            return

        if now is None:
            # We are being driven by a greenlet in realtime
            # Always run when we are told
            now = utils.get_aware_utc_now()
        else:
            # We are being driven by simulation
            # Return if we are not ready to run again
            if self.next_training > now:
                return

        # Train both forecast and component models
        for forecast_models in (False, True):
            results = {}
            all_parameters = self.model.get_training_parameters(
                forecast_models)

            for name, parameters in all_parameters.items():
                window, sources = parameters
                end = now
                start = end - timedelta(days=window)
                training_data = {}
                for topic in sources:
                    value = self.vip.rpc.call(
                        self.historian_vip_id,
                        "query",
                        topic,
                        utils.format_timestamp(start),
                        utils.format_timestamp(end),
                    ).get(timeout=4)
                    training_data[topic] = value

                results[name] = training_data

            self.model.apply_all_training_data(results, forecast_models)

        try:
            self.next_training = next(self.training_schedule)
            LOG.info("Next training scheduled for {}"
                     "".format(self.next_training))
        except StopIteration:
            self.next_training = None
            self.historian_training = False
            LOG.info("No more trainings scheduled")
Beispiel #14
0
        def actuator_request(self, site, command_equip):
            """
            Calls the actuator"s request_new_schedule method to get
                    device schedule
            :param command_equip: contains the names of the devices
                that will be scheduled with the ActuatorAgent.
            :type: dict or list
            :returns: Return result from request_new_schedule method
                and True or False for error in scheduling device.
            :rtype: boolean
            :Return Values:

                request_error = True/False

            warning:: Calling without previously scheduling a device and not within
                         the time allotted will raise a LockError"""

            _now = get_aware_utc_now()
            str_now = format_timestamp(_now)
            _end = _now + td(minutes=self.device_lock_duration)
            str_end = format_timestamp(_end)
            for device in command_equip:
                actuation_device = site['base_actuator_path'](unit=device,
                                                              point="")
                schedule_request = [[actuation_device, str_now, str_end]]
                try:
                    _log.info("Make Request {} for start {} and end {}".format(
                        actuation_device, str_now, str_end))
                    result = self.actuation_vip.call(
                        "platform.actuator", "request_new_schedule", "rcx",
                        actuation_device, "HIGH",
                        schedule_request).get(timeout=15)
                except RemoteError as ex:
                    _log.warning(
                        "Failed to schedule device {} (RemoteError): {}".
                        format(device, str(ex)))
                    request_error = True
                if result["result"] == "FAILURE":
                    if result["info"] == "TASK_ID_ALREADY_EXISTS":
                        _log.info("Task to schedule device already exists " +
                                  device)
                        request_error = False
                    else:
                        _log.warning(
                            "Failed to schedule device (unavailable) " +
                            device)
                        request_error = True
                else:
                    request_error = False

            return request_error
    def report(self):
        """ Report result of diagnostic analysis and publish
        to the VOLTTRON message bus.

        :return: None
        """
        # Multiple control steps and analysis can occur for each diagnostic
        # if self.fault_condition == all then all steps must have a fault
        # condition for a fault to be reported.
        self.headers = {
            "Date": format_timestamp(get_aware_utc_now()),
            "Timestamp": format_timestamp(get_aware_utc_now())
        }
        analysis = {}
        if -1 in self.evaluations:
            LOG.debug("Diagnostic %s resulted in inconclusive result",
                      self.name)
            analysis = {"result": -1}
            for publish_topic in self.analysis_topic:
                self.vip.pubsub.publish("pubsub",
                                        self.analysis_topic,
                                        headers=self.headers,
                                        message=analysis)
            self.evaluations = []
            return

        if self.fault_condition == "any":
            if False in self.evaluations:
                LOG.debug("%s - no fault detected", self.name)
                analysis = {"result": self.non_fault_code}
            else:
                LOG.debug("%s - fault detected", self.name)
                analysis = {"result": self.fault_code}
        # Multiple control steps and analysis can occur for each diagnostic
        # if self.fault_condition == "any"" then any step where a
        # fault condition is detected will lead to reporting a fault.
        else:
            if True in self.evaluations:
                LOG.debug("%s - fault detected", self.name)
                analysis = {"result": self.fault_code}
            else:
                LOG.debug("%s - no fault detected", self.name)
                analysis = {"result": self.non_fault_code}

        # Reinitialize evaluations list for use in next diagnostic run.
        for publish_topic in self.analysis_topic:
            self.vip.pubsub.publish("pubsub",
                                    publish_topic,
                                    headers=self.headers,
                                    message=analysis)
        self.evaluations = []
Beispiel #16
0
        def actuator_request(self, score_order):
            '''request access to devices.'''
            _now = get_aware_utc_now()
            str_now = format_timestamp(_now)
            _end = _now + longest_possible_curtail + actuator_schedule_buffer
            str_end = format_timestamp(_end)
            ctrl_dev = []

            already_handled = dict(
                (device, True) for device in self.scheduled_devices)

            for item in score_order:

                device, point = item

                _log.debug('Reserving device: ' + device)

                if device in already_handled:
                    if already_handled[device]:
                        _log.debug(
                            'Skipping reserve device (previously reserved): ' +
                            device)
                        ctrl_dev.append(item)
                    continue

                curtailed_device = base_rpc_path(unit=device, point='')
                schedule_request = [[curtailed_device, str_now, str_end]]
                try:
                    if self.kill_signal_recieved:
                        break
                    result = self.vip.rpc.call('platform.actuator',
                                               'request_new_schedule',
                                               agent_id, device, 'HIGH',
                                               schedule_request).get(timeout=5)
                except RemoteError as ex:
                    _log.warning(
                        'Failed to schedule device {} (RemoteError): {}'.
                        format(device, str(ex)))
                    continue

                if result['result'] == 'FAILURE':
                    _log.warn('Failed to schedule device (unavailable) ' +
                              device)
                    already_handled[device] = False
                else:
                    already_handled[device] = True
                    self.scheduled_devices.add(device)
                    ctrl_dev.append(item)

            return ctrl_dev
Beispiel #17
0
    def update_status(self, status, context=None):
        """
        Updates the internal state of the `Status` object.

        This method will throw errors if the context is not serializable or
        if the status parameter is not within the ACCEPTABLE_STATUS tuple.

        :param status:
        :param context:
        :return:
        """
        if status not in ACCEPTABLE_STATUS:
            raise ValueError('Invalid status value {}'.format(status))
        try:
            jsonapi.dumps(context)
        except TypeError:
            raise ValueError('Context must be JSON serializable.')

        status_changed = status != self._status
        self._status = status
        self._context = context
        self._last_updated = format_timestamp(get_aware_utc_now())

        if status_changed and self._status_changed_callback:
            print(self._status_changed_callback())
 def periodic_read(self, now):
     # we not use self.core.schedule to prevent drift.
     next_scrape_time = now + datetime.timedelta(seconds=self.interval)
     # Sanity check now.
     # This is specifically for when this is running in a VM that gets
     # suspended and then resumed.
     # If we don't make this check a resumed VM will publish one event
     # per minute of
     # time the VM was suspended for.
     test_now = utils.get_aware_utc_now()
     if test_now - next_scrape_time > datetime.timedelta(seconds=self.interval):
         next_scrape_time = self.find_starting_datetime(test_now)
     self.periodic_read_event = self.core.schedule(next_scrape_time, self.periodic_read, next_scrape_time)
     _log.debug("scraping device: " + self.device_name)
     try:
         results = self.interface.scrape_all()
     except Exception as ex:
         _log.error('Failed to scrape ' + self.device_name + ': ' + str(ex))
         return
     if results:
         utcnow_string = utils.format_timestamp(utils.get_aware_utc_now())
         headers = {headers_mod.DATE: utcnow_string,
                    headers_mod.TIMESTAMP: utcnow_string, }
         for point, value in results.iteritems():
             depth_first_topic, breadth_first_topic = self.get_paths_for_point(point)
             message = [value, self.meta_data[point]]
             self._publish_wrapper(depth_first_topic, headers=headers, message=message)
             self._publish_wrapper(breadth_first_topic, headers=headers, message=message)
         message = [results, self.meta_data]
         self._publish_wrapper(self.all_path_depth, headers=headers, message=message)
         self._publish_wrapper(self.all_path_breadth, headers=headers, message=message)
    def new_data(self, peer, sender, bus, topic, headers, message):
        """
        Call back method for curtailable device data subscription.
        :param peer:
        :param sender:
        :param bus:
        :param topic:
        :param headers:
        :param message:
        :return:
        """
        if self.kill_signal_received:
            return

        _log.info("Data Received for {}".format(topic))
        # topic of form:  devices/campus/building/device
        device_name = self.device_topic_map[topic]
        data = message[0]
        meta = message[1]
        now = parser.parse(headers["Date"])
        current_time_str = format_timestamp(now)
        parsed_data = parse_sympy(data)

        subdevices = self.curtailment.get_device(device_name).command_status.keys()
        for subdevice in subdevices:
            status = self.curtailment.get_device(device_name).currently_curtailed[subdevice]
            _log.debug("Device: {} -- subdevice: {} -- status: {}".format(device_name, subdevice, status))
            self.criteria.get_device(device_name[0]).criteria_status(subdevice, status)

        self.criteria.get_device(device_name[0]).ingest_data(now, parsed_data)
        self.curtailment.get_device(device_name).ingest_data(parsed_data)
        self.create_device_status_publish(current_time_str, device_name, data, topic, meta)
    def check_load(self, bldg_power, current_time):
        """
        Check whole building power and if the value is above the
        the demand limit (demand_limit) then initiate the ILC (AHP)
        sequence.
        :param bldg_power:
        :param current_time:
        :return:
        """
        _log.debug("Checking building load.")

        if self.demand_limit is None:
            result = "Demand goal has not been set. Current load: ({load}) kW.".format(load=bldg_power)
        else:
            result = "Current load: ({load}) kW is below demand limit of {limit} kW.".format(load=bldg_power,
                                                                                             limit=self.demand_limit)

        if self.demand_limit is not None and bldg_power > self.demand_limit:
            result = "Current load of {} kW exceeds demand limit of {} kW.".format(bldg_power, self.demand_limit)
            scored_devices = self.criteria.get_score_order()
            on_devices = self.curtailment.get_on_devices()
            score_order = [device for scored in scored_devices for device in on_devices if scored in [(device[0], device[1])]]

            _log.debug("Scored devices: {}".format(scored_devices))
            _log.debug("On devices: {}".format(on_devices))
            _log.debug("Scored and on devices: {}".format(score_order))

            if not score_order:
                _log.info("All devices are off, nothing to curtail.")
                return

            self.device_group_size = None
            scored_devices = self.actuator_request(score_order)
            self.curtail(scored_devices, bldg_power, current_time)
        self.create_application_status(format_timestamp(current_time), result)
Beispiel #21
0
    def on_polling(self):
        if self.zip is None and (self.region is None or self.city is None):
            return

        kwargs = {}
        if self.zip is not None:
            kwargs['zip'] = self.zip
            topic = 'weather2/polling/current/ZIP/{zip}/all'.format(zip=self.zip)
        else:
            kwargs['region'] = self.region
            kwargs['city'] = self.city
            topic = 'weather2/polling/current/{region}/{city}/all'.format(
                region=self.region,
                city=self.city
            )
        wu_resp = self.wu_service.current(**kwargs)
        publish_items = self.build_resp_current(wu_resp)

        if len(publish_items) > 0:
            headers = {
                HEADER_NAME_DATE: format_timestamp(utils.get_aware_utc_now()),
                HEADER_NAME_CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON
            }
            self.vip.pubsub.publish(peer='pubsub',
                                    topic=topic,
                                    message=publish_items,
                                    headers=headers)
            _log.debug(publish_items)
Beispiel #22
0
    def _on_platform_log_message(self, peer, sender, bus, topic, headers,
                                 message):
        """ Receive message from a registered platform

        This method is called with stats from the registered platform agents.

        """
        _log.debug('Got topic: {}'.format(topic))
        _log.debug('Got message: {}'.format(message))

        topicsplit = topic.split('/')
        platform_uuid = topicsplit[2]

        # For devices we use everything between devices/../all as a unique
        # key for determining the last time it was seen.
        key = '/'.join(topicsplit[:])
        _log.debug("key is: {}".format(key))
        uuid = topicsplit[2]

        point_list = []

        for point, item in message.iteritems():
            point_list.append(point)

        stats = {
            'topic': key,
            'points': point_list,
            'last_published_utc': format_timestamp(get_aware_utc_now())
        }

        self._registry.update_performance(platform_uuid=platform_uuid,
                                          performance=stats)
Beispiel #23
0
 def publish_all(self, observation, topic_prefix="weather", headers={}):
     utcnow = utils.get_aware_utc_now()
     utcnow_string = utils.format_timestamp(utcnow)
     headers.update({HEADER_NAME_DATE: utcnow_string,
                     headers_mod.TIMESTAMP: utcnow_string})
     self.publish_subtopic(self.build_dictionary(observation),
                           topic_prefix, headers)
Beispiel #24
0
    def read_callback(self, results):
        # XXX: Does a warning need to be printed?
        if results is None:
            return

        now = utils.format_timestamp(datetime.utcnow())
        
        headers = {
            headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
            headers_mod.DATE: now,
            headers_mod.TIMESTAMP: now
        }
         
        for point, value in results.iteritems():
            if isinstance(value, bool):
                value = int(value)
            self.add('/'+point, value)
            
        try:    
            for point, value in results.iteritems():
                if isinstance(value, bool):
                    value = int(value)
                depth, breadth = self.get_paths_for_point('/'+point)
                self.publish_json(depth, headers, value, self.meta_data[point], flags=zmq.NOBLOCK)
                self.publish_json(breadth, headers, value, self.meta_data[point], flags=zmq.NOBLOCK)
                
            self.publish_json(self.all_path_depth, headers, results, self.meta_data, flags=zmq.NOBLOCK)
            self.publish_json(self.all_path_breadth, headers, results, self.meta_data, flags=zmq.NOBLOCK)
        except zmq.error.Again:
            print ("Warning: platform not running, topics not published. (Data to smap historian is unaffected by this warning)")
Beispiel #25
0
 def clear_market(self):
     price = None
     quantity = None
     error_code = None
     error_message = None
     aux = {}
     if (self.state in [ACCEPT_ALL_OFFERS, ACCEPT_BUY_OFFERS, ACCEPT_SELL_OFFERS]):
         error_code = SHORT_OFFERS
         error_message = 'The market {} failed to recieve all the expected offers. The state is {}.'.format(self.market_name, self.state)
     elif (self.state != MARKET_DONE):
         error_code = BAD_STATE
         error_message = 'Programming error in Market class. State of {} and clear market signal arrived. This represents a logic error.'.format(self.state)
     else:
         if not self.has_market_formed():
             error_code = NOT_FORMED
             error_message = 'The market {} has not received a buy and a sell reservation.'.format(self.market_name)
         else:
             quantity, price, aux = self.offers.settle()
             if price is None:
                 error_code = NO_INTERSECT
                 error_message = "Error: The supply and demand curves do not intersect. The market {} failed to clear.".format(self.market_name)
     _log.info("Clearing price for Market: {} Price: {} Qty: {}".format(self.market_name, price, quantity))
     timestamp = self._get_time()
     timestamp_string = utils.format_timestamp(timestamp)
     self.publish(peer='pubsub',
                  topic=MARKET_CLEAR,
                  message=[timestamp_string, self.market_name, quantity, price])
     self.publish(peer='pubsub',
                  topic=MARKET_RECORD,
                  message=[timestamp_string, self.market_name, quantity, price])
     if error_message is not None:
         self.publish(peer='pubsub',
                      topic=MARKET_ERROR,
                      message=[timestamp_string, self.market_name, error_code, error_message, aux])
Beispiel #26
0
    def _on_device_message(self, peer, sender, bus, topic, headers, message):
        # only deal with agents that have not been forwarded.
        if headers.get('X-Forwarded', None):
            return

        # only listen to the ending all message.
        if not re.match('.*/all$', topic):
            return

        topicsplit = topic.split('/')

        # For devices we use everything between devices/../all as a unique
        # key for determining the last time it was seen.
        key = '/'.join(topicsplit[1: -1])

        anon_topic = self._topic_replace_map[key]

        if not anon_topic:
            anon_topic = key

            for sr in self._topic_replace_list:
                _log.debug(
                    'anon replacing {}->{}'.format(sr['from'], sr['to']))
                anon_topic = anon_topic.replace(sr['from'],
                                                sr['to'])
            _log.debug('anon after replacing {}'.format(anon_topic))
            _log.debug('Anon topic is: {}'.format(anon_topic))
            self._topic_replace_map[key] = anon_topic
        _log.debug('DEVICES ON PLATFORM ARE: {}'.format(self._devices))
        self._devices[anon_topic] = {
            'points': message[0].keys(),
            'last_published_utc': format_timestamp(get_aware_utc_now())
        }
Beispiel #27
0
    def new_data(self, peer, sender, bus, topic, headers, message):
        """
        Call back method for curtailable device data subscription.
        :param peer:
        :param sender:
        :param bus:
        :param topic:
        :param headers:
        :param message:
        :return:
        """
        if self.kill_signal_received:
            return

        _log.info("Data Received for {}".format(topic))
        # topic of form:  devices/campus/building/device
        device_name = self.device_topic_map[topic]
        data = message[0]
        meta = message[1]
        now = parser.parse(headers["Date"])
        current_time_str = format_timestamp(now)
        parsed_data = parse_sympy(data)

        subdevices = self.curtailment.get_device(device_name).command_status.keys()
        for subdevice in subdevices:
            status = self.curtailment.get_device(device_name).currently_curtailed[subdevice]
            _log.debug("Device: {} -- subdevice: {} -- status: {}".format(device_name, subdevice, status))
            self.criteria.get_device(device_name[0]).criteria_status(subdevice, status)

        self.criteria.get_device(device_name[0]).ingest_data(now, parsed_data)
        self.curtailment.get_device(device_name).ingest_data(parsed_data)
        self.create_device_status_publish(current_time_str, device_name, data, topic, meta)
Beispiel #28
0
    def check_load(self, bldg_power, current_time):
        """
        Check whole building power and if the value is above the
        the demand limit (demand_limit) then initiate the ILC (AHP)
        sequence.
        :param bldg_power:
        :param current_time:
        :return:
        """
        _log.debug("Checking building load.")

        if self.demand_limit is None:
            result = "Demand goal has not been set. Current load: ({load}) kW.".format(load=bldg_power)
        else:
            result = "Current load: ({load}) kW is below demand limit of {limit} kW.".format(load=bldg_power,
                                                                                             limit=self.demand_limit)

        if self.demand_limit is not None and bldg_power > self.demand_limit:
            result = "Current load of {} kW exceeds demand limit of {} kW.".format(bldg_power, self.demand_limit)
            scored_devices = self.criteria.get_score_order()
            on_devices = self.curtailment.get_on_devices()
            score_order = [device for scored in scored_devices for device in on_devices if scored in [(device[0], device[1])]]

            _log.debug("Scored devices: {}".format(scored_devices))
            _log.debug("On devices: {}".format(on_devices))
            _log.debug("Scored and on devices: {}".format(score_order))

            if not score_order:
                _log.info("All devices are off, nothing to curtail.")
                return

            self.device_group_size = None
            scored_devices = self.actuator_request(score_order)
            self.curtail(scored_devices, bldg_power, current_time)
        self.create_application_status(format_timestamp(current_time), result)
Beispiel #29
0
 def update_values(self, data, topic_name, start, end, values):
     if start.tzinfo:
         data[0] = data[0].replace(tzinfo=tzutc())
     if data[0] >= start and data[0] < end:
         result_value = self.json_string_to_dict(data[1])
         values[topic_name].append(
             (utils.format_timestamp(data[0]), result_value))
Beispiel #30
0
    def on_heartbeat_topic(self, peer, sender, bus, topic, headers, message):
        global counter
        # Test various RPC calls to the Chargepoint driver.
        counter += 1
        if counter > 1:
            # result = self.set_chargepoint_point('shedState', 0)
            # result = self.get_chargepoint_point('stationMacAddr')
            # result = self.get_chargepoint_point('Lat')
            # result = self.get_chargepoint_point('Long')
            # result = self.set_chargepoint_point('allowedLoad', 10)
            # result = self.set_chargepoint_point('percentShed', 50)
            # result = self.set_chargepoint_point('clearAlarms', True)
            # result = self.get_chargepoint_point('alarmType')
            # result = self.get_chargepoint_point('sessionID')
            result = self.get_chargepoint_point('Status')
            # result = self.get_chargepoint_point('stationRightsProfile')

            now = utils.format_timestamp(datetime.datetime.now())
            # Also publish a test pub/sub message just for kicks.
            result = self.publish_message('test_topic/test_subtopic',
                                          {
                                              headers_mod.DATE: now,
                                              headers_mod.TIMESTAMP: now
                                          },
                                          [{'property_1': 1, 'property_2': 2}, {'property_3': 3, 'property_4': 4}])

            counter = 0
Beispiel #31
0
    def publish_baseline(self, df, cur_time):
        """
        This method is obsolete. Keep here for reference only.
        """
        topic_tmpl = "analysis/PGnE/{campus}/{building}/"
        topic_prefix = topic_tmpl.format(campus=self.site,
                                         building=self.building)
        headers = {'Date': format_timestamp(cur_time)}
        last_idx = len(df.index)-1
        sec_last_idx = last_idx - 1
        #avg 10 day
        topic1 = topic_prefix + "avg10"
        value1 = df['pow_avg'][last_idx]
        #adj avg 10 day
        topic2 = topic_prefix + "adj_avg10"
        value_hr1 = df['pow_adj_avg'][sec_last_idx]
        value_hr2 = df['pow_adj_avg'][last_idx]
        #avg 5 hottest in 10 day
        topic3 = topic_prefix + "hot5_avg10"
        value3 = df['hot5_pow_avg'][last_idx]
        #adj avg 5 hottest in 10 day
        topic4 = topic_prefix + "hot5_adj_avg10"
        value4 = df['hot5_pow_adj_avg'][last_idx]

        #publish to message bus: only 10 day adjustment
        meta = {'type': 'float', 'tz': self.tz, 'units': 'kW'}
        msg = [{
            "value_hr1": value_hr1,
            "value_hr2": value_hr2
        }, {
            "value_hr1": meta,
            "value_hr2": meta
        }]
        self.vip.pubsub.publish(
            'pubsub', topic2, headers, msg).get(timeout=10)
Beispiel #32
0
    def _set_override_off(self, pattern):
        pattern = pattern.lower()
        # If pattern exactly matches
        if pattern in self._override_patterns:
            self._override_patterns.discard(pattern)
            # Cancel any pending override events
            self._cancel_override_events(pattern)
            self._override_devices.clear()
            patterns = dict()
            # Build override devices list again
            for pat in self._override_patterns:
                for device in self.instances:
                    device = device.lower()
                    if fnmatch.fnmatch(device, pat):
                        self._override_devices.add(device)

                if self._override_interval_events[pat] is None:
                    patterns[pat] = str(0.0)
                else:
                    evt, end_time = self._override_interval_events[pat]
                    patterns[pat] = utils.format_timestamp(end_time)

            self.vip.config.set("override_patterns", jsonapi.dumps(patterns))
        else:
            _log.error("Override Pattern did not match!")
            raise OverrideError(
                "Pattern {} does not exist in list of override patterns".
                format(pattern))
Beispiel #33
0
    def update_status(self, status, context=None):
        """
        Updates the internal state of the `Status` object.

        This method will throw errors if the context is not serializable or
        if the status parameter is not within the ACCEPTABLE_STATUS tuple.

        :param status:
        :param context:
        :return:
        """
        if status not in ACCEPTABLE_STATUS:
            raise ValueError('Invalid status value {}'.format(status))
        try:
            jsonapi.dumps(context)
        except TypeError:
            raise ValueError('Context must be JSON serializable.')

        status_changed = status != self._status
        self._status = status
        self._context = context
        self._last_updated = format_timestamp(get_aware_utc_now())

        if status_changed and self._status_changed_callback:
            self._status_changed_callback()
Beispiel #34
0
    def on_heartbeat_topic(self, peer, sender, bus, topic, headers, message):
        global counter
        # Test various RPC calls to the Chargepoint driver.
        counter += 1
        if counter > 1:
            # result = self.set_chargepoint_point('shedState', 0)
            # result = self.get_chargepoint_point('stationMacAddr')
            # result = self.get_chargepoint_point('Lat')
            # result = self.get_chargepoint_point('Long')
            # result = self.set_chargepoint_point('allowedLoad', 10)
            # result = self.set_chargepoint_point('percentShed', 50)
            # result = self.set_chargepoint_point('clearAlarms', True)
            # result = self.get_chargepoint_point('alarmType')
            # result = self.get_chargepoint_point('sessionID')
            result = self.get_chargepoint_point('Status')
            # result = self.get_chargepoint_point('stationRightsProfile')

            now = utils.format_timestamp(datetime.datetime.now())
            # Also publish a test pub/sub message just for kicks.
            result = self.publish_message('test_topic/test_subtopic', {
                headers_mod.DATE: now,
                headers_mod.TIMESTAMP: now
            }, [{
                'property_1': 1,
                'property_2': 2
            }, {
                'property_3': 3,
                'property_4': 4
            }])

            counter = 0
Beispiel #35
0
    def process_point(self, now, topic, min_value=None, max_value=None, output_topic=None,
                      aggregate_method=None):
        """
        This is where the magic happens.

        Introducing new or changing methods to clean/massage/introduce new data go here.

        Currently this function republishes the most recent data as is if it
        is not too old and falls within the min and max settings.

        If the most recent value is stale it uses an average of the last 30 days.

        New keyword arguments to this function can be added as needed and will be passed
        straight from the arguments to a topic in the configuration file.

        :param now:
        :param topic:
        :param min_value:
        :param max_value:
        :param output_topic:
        :param aggregate_method:
        """
        _log.debug("Processing topic: {}".format(topic))

        if output_topic is None:
            _log.error("No output topic for {}".format(topic))
            return

        # Query the data from the historian
        results = self.vip.rpc.call("platform.historian", "query", topic, "now -1d").get(timeout=5.0)

        values = results["values"]
        if not values:
            _log.error("No values for {}".format(topic))
            return

        last_timestamp, value = values[-1]
        last_timestamp = utils.parse_timestamp_string(last_timestamp)

        if now - last_timestamp > timedelta(seconds=self.period):
            _log.warning("Data used for {} is stale".format(topic))
            if aggregate_method == "avg":
                results = self.vip.rpc.call("platform.historian", "query", topic, "now -30d").get(timeout=5.0)
                values = results["values"]
                average = sum(x[1] for x in values)
                average /= len(values)
                value = average
            # Do something here to fake a better value.

        # Make sure the value is within bounds.
        if min_value is not None:
            value = max(min_value, value)

        if max_value is not None:
            value = min(max_value, value)

        #Publish the result.
        self.vip.pubsub.publish("pubsub", output_topic,
                            headers={headers.TIMESTAMP: utils.format_timestamp(now), "source": topic},
                            message=value)
Beispiel #36
0
 def publish(self, device_topic):
     headers = {'Date': utils.format_timestamp(datetime.utcnow()\
                                               .astimezone(dateutil.tz.gettz(self.timezone)))}
     message = [{
         'excess_operation': bool(self.excess_operation),
         'device_status': bool(self.device_status),
         'device_true_time': int(self.device_true_time)
     }, {
         'excess_operation': {
             'units': 'None',
             'tz': 'UTC',
             'data_type': 'bool'
         },
         'device_status': {
             'units': 'None',
             'tz': 'UTC',
             'data_type': 'bool'
         },
         'device_true_time': {
             'units': 'seconds',
             'tz': 'UTC',
             'data_type': 'integer'
         }
     }]
     device_topic = device_topic.replace("all", "report/all")
     try:
         self.vip.pubsub.publish(peer='pubsub',
                                 topic=device_topic,
                                 message=message,
                                 headers=headers)
     except Exception as e:
         _log.error("In Publish: {}".format(str(e)))
Beispiel #37
0
    def _on_device_message(self, peer, sender, bus, topic, headers, message):
        # only deal with agents that have not been forwarded.
        if headers.get('X-Forwarded', None):
            return

        # only listen to the ending all message.
        if not re.match('.*/all$', topic):
            return

        topicsplit = topic.split('/')

        # For devices we use everything between devices/../all as a unique
        # key for determining the last time it was seen.
        key = '/'.join(topicsplit[1:-1])

        anon_topic = self._topic_replace_map[key]

        if not anon_topic:
            anon_topic = key

            for sr in self._topic_replace_list:
                _log.debug('anon replacing {}->{}'.format(
                    sr['from'], sr['to']))
                anon_topic = anon_topic.replace(sr['from'], sr['to'])
            _log.debug('anon after replacing {}'.format(anon_topic))
            _log.debug('Anon topic is: {}'.format(anon_topic))
            self._topic_replace_map[key] = anon_topic
        _log.debug('DEVICES ON PLATFORM ARE: {}'.format(self._devices))
        self._devices[anon_topic] = {
            'points': message[0].keys(),
            'last_published_utc': format_timestamp(get_aware_utc_now())
        }
Beispiel #38
0
def publish_test_data(publish_agent, start_time, start_reading, count):
    reading = start_reading
    time = start_time
    print("publishing test data starttime is {} utcnow is {}".format(
        start_time, datetime.utcnow()))
    print("publishing test data value string {} at {}".format(
        reading, datetime.now()))

    float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'}
    for i in range(0, count):
        #print("publishing reading {} at time {}".format(reading,
        #                                                time.isoformat()))
        # Create a message for all points.
        all_message = [{
            'in_temp': reading,
            'out_temp': reading
        }, {
            'in_temp': float_meta,
            'out_temp': float_meta
        }]
        time_str = utils.format_timestamp(time)
        headers = {headers_mod.DATE: time_str, headers_mod.TIMESTAMP: time_str}
        publish_agent.vip.pubsub.publish('pubsub',
                                         "devices/device1/all",
                                         headers=headers,
                                         message=all_message).get(timeout=10)

        reading += 1
        time = time + timedelta(seconds=30)
Beispiel #39
0
 def query_current_weather(self, location):
     """
     Retrieve data from the Darksky API, return formatted current data and
     store forecast data in cache
     :param location: location dictionary requested by the user
     :return: Timestamp and data for current data from the Darksky API
     """
     darksky_response = self.get_darksky_data('get_current_weather',
                                              location)
     if 'currently' not in darksky_response:
         _log.error(
             "Current data not found in Dark Sky response: {}".format(
                 darksky_response))
     current_response = darksky_response.pop('currently')
     # Darksky required attribution
     current_response["attribution"] = "Powered by Dark Sky"
     current_time = datetime.datetime.fromtimestamp(
         current_response['time'],
         pytz.timezone(darksky_response['timezone']))
     current_time = current_time.astimezone(pytz.utc)
     if not self.performance_mode:
         # if performance mode isn't running we'll be receiving extra data
         # that we can store to help with conserving daily api calls
         for service in SERVICES_MAPPING:
             if service is not 'get_current_weather' and \
                     SERVICES_MAPPING[service]['json_name'] in \
                     darksky_response:
                 service_response = darksky_response.pop(
                     SERVICES_MAPPING[service]['json_name'])
                 service_data = self.format_multientry_response(
                     location, service_response, service,
                     darksky_response['timezone'])
                 self.store_weather_records(service, service_data)
     return format_timestamp(current_time), current_response
Beispiel #40
0
    def _update_device_state_and_schedule(self, now):
        _log.debug("_update_device_state_and_schedule")
        # Sanity check now.
        # This is specifically for when this is running in a VM that gets suspeded and then resumed.
        # If we don't make this check a resumed VM will publish one event per minute of
        # time the VM was suspended for. 
        test_now = datetime.datetime.now()
        if test_now - now > datetime.timedelta(minutes=3):
            now = test_now

        self._device_states = self._schedule_manager.get_schedule_state(now)
        schedule_next_event_time = self._schedule_manager.get_next_event_time(now)
        new_update_event_time = self._get_ajusted_next_event_time(now, schedule_next_event_time)

        for device, state in self._device_states.iteritems():
            header = self._get_headers(state.agent_id, time=utils.format_timestamp(now), task_id=state.task_id)
            header['window'] = state.time_remaining
            topic = topics.ACTUATOR_SCHEDULE_ANNOUNCE_RAW.replace('{device}', device)
            self.vip.pubsub.publish('pubsub', topic, headers=header)

        if self._update_event is not None:
            # This won't hurt anything if we are canceling ourselves.
            self._update_event.cancel()
        self._update_event = self.core.schedule(new_update_event_time,
                                                self._update_schedule_state,
                                                new_update_event_time)
Beispiel #41
0
    def new_supply_signal(self, peer, sender, bus, topic, headers, message):
        _log.debug("At {}, {} receives new supply records: {}".format(Timer.get_cur_time(),
                                                                     self.name, message))
        supply_curves = message['curves']
        start_of_cycle = message['start_of_cycle']

        self.campus.model.receive_transactive_signal(self, supply_curves)
        _log.debug("At {}, mixmarket state is {}, start_of_cycle {}".format(Timer.get_cur_time(),
                                                                            self.mix_market_running,
                                                                            start_of_cycle))

        db_topic = "/".join([self.db_topic, self.name, "CampusSupply"])
        message = supply_curves
        headers = {headers_mod.DATE: format_timestamp(Timer.get_cur_time())}
        self.vip.pubsub.publish("pubsub", db_topic, headers, message).get()

        if start_of_cycle:
            _log.debug("At {}, start of cycle. "
                       "Mixmarket state before overriding is {}".format(Timer.get_cur_time(),
                                                                        self.mix_market_running))

            # if self.simulation:
            #     self.run_ep_sim(start_of_cycle)
            # else:
            self.start_mixmarket(start_of_cycle)
Beispiel #42
0
    def _set_override_off(self, pattern):
        pattern = pattern.lower()
        # If pattern exactly matches
        if pattern in self._override_patterns:
            self._override_patterns.discard(pattern)
            # Cancel any pending override events
            self._cancel_override_events(pattern)
            self._override_devices.clear()
            patterns = dict()
            # Build override devices list again
            for pat in self._override_patterns:
                for device in self.instances:
                    device = device.lower()
                    if fnmatch.fnmatch(device, pat):
                        self._override_devices.add(device)

                if self._override_interval_events[pat] is None:
                    patterns[pat] = str(0.0)
                else:
                    evt, end_time = self._override_interval_events[pat]
                    patterns[pat] = utils.format_timestamp(end_time)

            self.vip.config.set("override_patterns", jsonapi.dumps(patterns))
        else:
            _log.error("Override Pattern did not match!")
            raise OverrideError(
                "Pattern {} does not exist in list of override patterns".format(pattern))
Beispiel #43
0
def publish_devices_fake_data(publish_agent, time=None):
    # Publish fake data. The format mimics the format used by VOLTTRON drivers.
    # Make some random readings
    global DEVICES_ALL_TOPIC
    reading = random_uniform(30, 100)
    meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'}
    # Create a message for all points.
    all_message = [{
        'OutsideAirTemperature': reading,
        'MixedAirTemperature': reading,
        'DamperSignal': reading
    }, {
        'OutsideAirTemperature': meta,
        'MixedAirTemperature': meta,
        'DamperSignal': meta
    }]
    # Create timestamp
    if not time:
        time = utils.format_timestamp(datetime.utcnow())
    # now = '2015-12-02T00:00:00'
    headers = {headers_mod.DATE: time, headers_mod.TIMESTAMP: time}
    print("Published time in header: " + time)
    # Publish messages
    publish(publish_agent, DEVICES_ALL_TOPIC, headers, all_message)
    return time, reading, meta
Beispiel #44
0
def test_calls_exceeded(volttron_instance, cleanup_cache, query_agent, weather):
    weather_uuid = weather[0]
    identity = weather[1]
    version = query_agent.vip.rpc.call(identity, 'get_version').get(timeout=3)
    cwd = volttron_instance.volttron_home
    database_file = "/".join([cwd, "agents", weather_uuid, "darkskyagent-" + version, "darkskyagent-" + version +
                              ".agent-data", "weather.sqlite"])
    sqlite_connection = sqlite3.connect(database_file)
    cursor = sqlite_connection.cursor()

    for i in range(0, 100):
        time = format_timestamp(get_aware_utc_now() + timedelta(seconds=i))
        insert_query = """INSERT INTO API_CALLS (CALL_TIME) VALUES (?);"""
        cursor.execute(insert_query, (time,))
    sqlite_connection.commit()

    locations = [{"lat": 39.7555, "long": -105.2211}]
    query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', locations).get(timeout=30)

    assert query_data[0]['weather_error'] == 'No calls currently available for the configured API key'
    assert not query_data[0].get('weather_results')

    query_data = query_agent.vip.rpc.call(identity, 'get_hourly_forecast', locations).get(timeout=30)

    assert query_data[0]['weather_error'] == 'No calls currently available for the configured API key'
    assert not query_data[0].get('weather_results')

    delete_query = "DROP TABLE IF EXISTS API_CALLS;"
    cursor.execute(delete_query)

    create_query = """CREATE TABLE API_CALLS (CALL_TIME TIMESTAMP NOT NULL);"""
    cursor.execute(create_query)
    sqlite_connection.commit()
        def publish_analysis_results(self, results):
            """
            Publish table_data in analysis results to the message bus for
                capture by the data historian.

            :param results: Results object containing commands for devices,
                    log messages and table data.
            :type results: Results object \\volttron.platform.agent.driven
            :returns: Same as results param.
            :rtype: Results object \\volttron.platform.agent.driven
            """
            to_publish = defaultdict(dict)
            for app, analysis_table in results.table_output.items():
                try:
                    name_timestamp = app.split("&")
                    timestamp = name_timestamp[1]
                except:
                    timestamp = self.received_input_datetime
                    timestamp = format_timestamp(timestamp)

                headers = {headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON, headers_mod.DATE: timestamp, }
                for entry in analysis_table:
                    for point, result in entry.items():
                        for device in command_devices:
                            publish_topic = "/".join([publish_base, device, point])
                            analysis_topic = topics.RECORD(subtopic=publish_topic)
                            datatype = str(type(value))
                            to_publish[analysis_topic] = result

                for result_topic, result in to_publish.items():
                    self.vip.pubsub.publish("pubsub", result_topic, headers, result)
                to_publish.clear()
            return results
Beispiel #46
0
    def publish_report(self):
        for device, _cls in self.device_dict.items():
            for sensor_topic in _cls.sensors:
                evaluation = _cls.evaluate(sensor_topic)
                if evaluation is None:
                    continue
                report_topic = _cls.report_topic[sensor_topic]
                for index, row in evaluation.iterrows():
                    ts = utils.format_timestamp(row["timestamp"])
                    fault_condition = round(row["fault"])

                    headers = {'Date': ts}
                    message = [{
                        'fault_condition': fault_condition
                    }, {
                        'fault_condition': {
                            'tz': 'UTC',
                            'data_type': 'integer'
                        }
                    }]
                    try:
                        self.vip.pubsub.publish(peer='pubsub',
                                                topic=report_topic,
                                                message=message,
                                                headers=headers)
                    except Exception as e:
                        _log.error("In Publish: {}".format(str(e)))
Beispiel #47
0
def test_multi_messagebus_custom_topic_forwarder(multi_messagebus_forwarder):
    """
    Forward Historian test for custom topics with multi message bus combinations
    :return:
    """
    from_instance, to_instance = multi_messagebus_forwarder
    publish_agent = from_instance.dynamic_agent
    subscriber_agent = to_instance.dynamic_agent

    subscriber_agent.callback = MagicMock(name="callback")
    subscriber_agent.callback.reset_mock()
    subscriber_agent.vip.pubsub.subscribe(
        peer='pubsub', prefix='foo', callback=subscriber_agent.callback).get()
    #subscriber_agent.vip.pubsub.list(subscriber_agent.core.identity)
    # Create timestamp
    now = utils.format_timestamp(datetime.utcnow())
    print("now is ", now)
    headers = {headers_mod.DATE: now, headers_mod.TIMESTAMP: now}
    gevent.sleep(5)
    for i in range(0, 5):
        topic = "foo/grid_signal"
        value = 78.5 + i
        publish(publish_agent, topic, headers, value)
        gevent.sleep(0.1)
    gevent.sleep(1)
    assert subscriber_agent.callback.call_count == 5
Beispiel #48
0
def main(database_name):
    db = sqlite3.connect(database_name)
    c = db.cursor()
    c.execute("select max(rowid) from data;")
    count = c.fetchone()[0]

    #Batches of 1000
    #We do this because of a bug in the sqlite implementation in python
    #which causes problems with nested cursors.
    for i in range(0, count, 1000):
        c.execute(
            "select rowid, ts from data where rowid > ? order by rowid asc limit 1000;",
            (i, ))
        rows = c.fetchall()
        print("processing rowid:", i + 1, "to", i + len(rows))

        for rowid, ts in rows:
            #Skip already converted rows.
            if "T" in ts:
                continue

            new_ts = format_timestamp(parse_timestamp_string(ts))
            c.execute("update data set ts = ? where rowid = ?;",
                      (new_ts, rowid))

        db.commit()
Beispiel #49
0
 def update_values(self, data, topic_name, start, end, values):
     if start.tzinfo:
         data[0] = data[0].replace(tzinfo=tzutc())
     if data[0] >= start and data[0] < end:
         result_value = self.json_string_to_dict(data[1])
         values[topic_name].append(
             (utils.format_timestamp(data[0]), result_value))
Beispiel #50
0
    def _set_override_off(self, pattern):
        """Turn off override condition on all devices matching the pattern. It removes the pattern from the override
        patterns set, clears the list of overriden devices  and reevaluates the state of devices. It then cancels the
        pending override event and removes pattern from the config store.
        :param pattern: Override pattern to be removed.
        :type pattern: str
        """

        pattern = pattern.lower()

        # If pattern exactly matches
        if pattern in self._override_patterns:
            self._override_patterns.discard(pattern)
            # Cancel any pending override events
            self._cancel_override_events(pattern)
            self._override_devices.clear()
            patterns = dict()
            # Build override devices list again
            for pat in self._override_patterns:
                for device in self.instances:
                    device = device.lower()
                    if fnmatch.fnmatch(device, pat):
                        self._override_devices.add(device)

                if self._override_interval_events[pat] is None:
                    patterns[pat] = str(0.0)
                else:
                    evt, end_time = self._override_interval_events[pat]
                    patterns[pat] = utils.format_timestamp(end_time)

            self.vip.config.set("override_patterns", jsonapi.dumps(patterns))
        else:
            _log.error("Override Pattern did not match!")
            raise OverrideError(
                "Pattern {} does not exist in list of override patterns".format(pattern))
Beispiel #51
0
 def begin_collect_offers(self, timestamp):
     _log.debug("send_collect_offers_request at {}".format(timestamp))
     self.start_offers_has_markets()
     self.market_list.collect_offers()
     unformed_markets = self.market_list.unformed_market_list()
     self.vip.pubsub.publish(peer='pubsub',
                             topic=MARKET_BID,
                             message=[utils.format_timestamp(timestamp), unformed_markets])
Beispiel #52
0
 def publish_weather_report(self, topic_name, parsed_weather_data):
     now = utils.format_timestamp(datetime.utcnow())
     headers = {
         headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.PLAIN_TEXT,
         headers_mod.DATE: now,
         headers_mod.TIMESTAMP: now
     }
     self.vip.pubsub.publish('pubsub', topic_name, headers, json.dumps(parsed_weather_data))
Beispiel #53
0
 def send_collect_reservations_request(self, timestamp):
     _log.debug("send_collect_reservations_request at {}".format(timestamp))
     self.start_reservations()
     self.market_list.send_market_failure_errors()
     self.market_list.clear_reservations()
     self.vip.pubsub.publish(peer='pubsub',
                             topic=MARKET_RESERVE,
                             message=utils.format_timestamp(timestamp))
    def actuator_request(self, score_order):
        """
        Request schedule to interact with devices via rpc call to actuator agent.
        :param score_order: ahp priority for devices (curtailment priority).
        :return:
        """
        current_time = get_aware_utc_now()
        start_time_str = format_timestamp(current_time)
        end_curtail_time = current_time + self.longest_possible_curtail + self.actuator_schedule_buffer
        end_time_str = format_timestamp(end_curtail_time)
        curtailable_device = []

        already_handled = dict((device[0], True) for device in self.scheduled_devices)

        for item in score_order:

            device, token, device_actuator = item

            _log.debug("Reserving device: {}".format(device))

            if device in already_handled:
                if already_handled[device]:
                    _log.debug("Skipping reserve device (previously reserved): " + device)
                    curtailable_device.append(item)
                continue

            curtailed_device = self.base_rpc_path(unit=device, point="")
            schedule_request = [[curtailed_device, start_time_str, end_time_str]]
            try:
                if self.kill_signal_received:
                    break
                result = self.vip.rpc.call(device_actuator, "request_new_schedule",
                                           self.agent_id, device, "HIGH", schedule_request).get(timeout=5)
            except RemoteError as ex:
                _log.warning("Failed to schedule device {} (RemoteError): {}".format(device, str(ex)))
                continue

            if result["result"] == "FAILURE":
                _log.warn("Failed to schedule device (unavailable) " + device)
                already_handled[device] = False
            else:
                already_handled[device] = True
                self.scheduled_devices.add((device, device_actuator))
                curtailable_device.append(item)

        return curtailable_device
 def publish_heartbeat(self):
     now = utils.format_timestamp(datetime.utcnow())
     headers = {
         headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.PLAIN_TEXT,
         headers_mod.DATE: now,
         headers_mod.TIMESTAMP: now
     }
     result = self.vip.pubsub.publish('pubsub', 'heartbeat/NodeRedSubscriber', headers, now)
     result.get(timeout=10)
Beispiel #56
0
    def publish_loop(self):
        """Publish data from file to message bus."""
        # We cannot reset the value in the config until we are in a separate greenlet.
        # We cannot call set in a config handler.

        if self._reset_playback:
            self._line_marker = 0
            if self._remember_playback:
                self.vip.config.set(LINE_MARKER_CONFIG, str(0), send_update=False)

        while True:
            current_line = -1
            for row in self._data:
                current_line += 1

                if current_line < self._line_marker:
                    continue

                self._line_marker += 1

                if self._use_timestamp and "Timestamp" in row:
                    now = row['Timestamp']
                    if not self.check_frequency(now):
                        continue
                else:
                    now = utils.format_timestamp(datetime.datetime.now())

                headers = {HEADER_NAME_DATE: now, HEADER_NAME_TIMESTAMP: now}
                row.pop('Timestamp', None)

                publish_dict = self.build_publishes(row)

                _log.debug("Publishing data for timestamp: {}".format(now))

                for topic, message in publish_dict.iteritems():
                    self._publish_point_all(topic, message, self._meta_data, headers)

                if self._remember_playback:
                    self.vip.config.set(LINE_MARKER_CONFIG, str(self._line_marker), send_update=False)

                gevent.sleep(self._publish_interval)

            # Reset line marker.
            self._line_marker = 0
            if self._remember_playback:
                self.vip.config.set(LINE_MARKER_CONFIG, str(self._line_marker), send_update=False)
            if not self._replay_data:
                sys.exit(0)
                break

            # Reset the csv reader if we are reading from a file.
            _log.debug("Restarting playback.")
            # Reset data frequency counter.
            self._next_allowed_publish = None
            if not isinstance(self._input_data, list):
                handle = open(self._input_data, 'rb')
                self._data = csv.DictReader(handle)
Beispiel #57
0
    def publish_to_historian(self, to_publish_list):

        _log.debug("publish_to_historian number of items: {}".format(
            len(to_publish_list)))

        try:
            for stored_index, row in enumerate(to_publish_list):
                ts = utils.format_timestamp(row['timestamp'])
                source = row['source']
                topic = row['topic']
                meta = row['meta']
                value = row['value']
                value_string = str(value)

                # Check type of value from metadata if it exists,
                # then cast value to that type
                try:
                    value_type = meta["type"]
                    value = influxdbutils.value_type_matching(value_type, value)
                except KeyError:
                    _log.info("Metadata doesn't include \'type\' keyword")
                except ValueError:
                    _log.warning("Metadata specifies \'type\' of value is {} while "
                                 "value={} is type {}".format(value_type, value, type(value)))

                topic_id = topic.lower()

                # If the topic is not in the list
                if topic_id not in self._topic_id_map:
                    self._topic_id_map[topic_id] = topic
                    self._meta_dicts[topic_id] = {}

                # If topic's metadata changes, update its metadata.
                if topic_id in self._topic_id_map and meta != self._meta_dicts[topic_id]:

                    _log.info("Updating meta for topic {} at {}".format(topic_id, ts))
                    self._meta_dicts[topic_id] = meta

                    # Insert the meta into the database
                    influxdbutils.insert_meta(self._client, topic_id, topic, meta, ts)
                # Else if topic name in database changes, update.
                elif topic_id in self._topic_id_map and self._topic_id_map[topic_id] != topic:
                    _log.info("Updating actual topic name {} in database for topic id {}".format(topic, topic_id))
                    self._topic_id_map[topic_id] = topic

                    # Update topic name in the database
                    influxdbutils.insert_meta(self._client, topic_id, topic, meta, ts)

                # Insert data point
                influxdbutils.insert_data_point(self._client, ts, topic_id, source, value, value_string)

            # After all data points are published
            self.report_all_handled()
            _log.info("Store ALL data in to_publish_list to InfluxDB client")

        except ConnectionError, err:
            raise err
def publish_minute_data_for_two_hours(agent):
    now = get_aware_utc_now()
    # expection[datetime]={oat:b,mixed:c,damper:d}
    expectation = {}

    for h in xrange(2):
        data_by_time = {}

        for m in xrange(60):
            # Because timestamps in mongo are only concerned with the first
            #  three digits after the decimal we do this to give some
            # randomness here.
            myint = random.randint(0, 1000)
            mymicro = str(myint)+'000'

            now = datetime(now.year, now.month, now.day, h, m,
                           random.randint(0, 59), int(mymicro))
            # Make some random readings
            oat_reading = random.uniform(30, 100)
            mixed_reading = oat_reading + random.uniform(-5, 5)
            damper_reading = random.uniform(0, 100)

            # Create a message for all points.
            all_message = [{
                'OutsideAirTemperature': oat_reading,
                'MixedAirTemperature': mixed_reading,
                'DamperSignal': damper_reading},
                {'OutsideAirTemperature':
                     {'units': 'F', 'tz': 'UTC', 'type': 'float'},
                 'MixedAirTemperature': {'units': 'F', 'tz': 'UTC', 'type': 'float'},
                 'DamperSignal': {'units': '%', 'tz': 'UTC', 'type': 'float'}
                 }]

            now_iso_string = format_timestamp(now)
            data_by_time[now_iso_string] = {
                "oat_point": oat_reading,
                "mixed_point": mixed_reading,
                "damper_point": damper_reading
            }

            # now = '2015-12-02T00:00:00'
            headers = {
                headers_mod.DATE: now_iso_string
            }

            # Publish messages
            agent.vip.pubsub.publish(
                    'pubsub', ALL_TOPIC, headers, all_message).get(timeout=10)

            expectation[now] = {
                query_points['oat_point']: oat_reading,
                query_points['mixed_point']: mixed_reading,
                query_points['damper_point']: damper_reading
            }
    gevent.sleep(0.1)
    return expectation