示例#1
0
    def main_run_process(self):
        """
        Main run process for RegressionAgent.  Calls data query methods
        and regression methods.  Stores each devices result.
        :return:
        """
        if self.regression_inprogress:
            return
        self.regression_inprogress = True
        self.exec_start = utils.get_aware_utc_now()
        _log.debug('Start regression - UTC converted: {}'.format(self.start))
        _log.debug('End regression UTC converted: {}'.format(self.end))

        # iterate for each device or subdevice in the device list
        for name, device in self.device_list.items():
            self.exec_start = utils.get_aware_utc_now()
            df = self.query_historian(device.input_data)
            df = self.localize_df(df, name)
            result = self.regression_list[name].regression_main(df, name)
            if result is None:
                _log.debug("ERROR for regression for %s", name)
                continue
            result.reset_index()
            result = result.to_dict(orient='list')
            self.coefficient_results[device.record_topic] = result
            if self.debug:
                with open('{}/{}_results.json'.format(WORKING_DIR, name), 'w+') as outfile:
                    json.dump(result, outfile, indent=4, separators=(',', ': '))
                _log.debug('*** Finished outputting coefficients ***')
            self.publish_coefficients(device.record_topic, result)
            exec_end = utils.get_aware_utc_now()
            exec_dif = exec_end - self.exec_start
            self.regression_inprogress = False
            _log.debug("Regression for %s duration: %s", device, exec_dif)
示例#2
0
def test_agent_logs(volttron_instance1, agent):
    """
    Test if alert agent's start and stop time are getting logged correctly
    :param volttron_instance1: instance in which alert agent is running
    :param agent: fake agent used to make rpc calls to alert agent
    """
    global alert_messages, db_connection, alert_uuid
    stop_t = get_aware_utc_now()
    volttron_instance1.stop_agent(alert_uuid)
    gevent.sleep(1)
    c = db_connection.cursor()
    c.execute("SELECT * FROM agent_log "
              "WHERE start_time IS NOT NULL AND "
              "stop_time > '{}'".format(stop_t))
    r = c.fetchall()
    assert len(r) == 1
    start_t = get_aware_utc_now()
    volttron_instance1.start_agent(alert_uuid)
    gevent.sleep(4)
    stop_t = get_aware_utc_now()
    volttron_instance1.stop_agent(alert_uuid)
    c.execute("SELECT * FROM agent_log "
              "WHERE start_time > '{}' AND "
              "stop_time > '{}'".format(start_t, stop_t))
    r = c.fetchall()
    assert len(r) == 1
    volttron_instance1.start_agent(alert_uuid)
    gevent.sleep(1)
示例#3
0
    def publish_target_info_pgne(self, cur_analysis_time_utc):
        cur_analysis_time_utc = parser.parse(cur_analysis_time_utc)

        target_messages = self.get_target_info_pgne(format_timestamp(cur_analysis_time_utc), 'UTC')
        if len(target_messages) > 0:

            target_topic = '/'.join(['analysis', 'target_agent', self.site, self.building, 'goal'])
            for target_message in target_messages:
                headers = {'Date': format_timestamp(get_aware_utc_now())}
                self.vip.pubsub.publish(
                    'pubsub', target_topic, headers, target_message).get(timeout=15)
                _log.debug("TargetAgent {topic}: {value}".format(
                    topic=target_topic,
                    value=target_message))
                gevent.sleep(2)

        # Schedule next run at min 30 of next hour only if current min >= 30
        one_hour = timedelta(hours=1)
        cur_min = cur_analysis_time_utc.minute
        next_analysis_time = cur_analysis_time_utc.replace(minute=30,
                                                           second=0,
                                                           microsecond=0)
        if cur_min >= 30:
            next_analysis_time += one_hour

        next_run_time = next_analysis_time
        if self.dr_mode == 'dev':
            next_run_time = get_aware_utc_now() + timedelta(seconds=15)

        if self.dr_mode != 'manual':
            self.core.schedule(next_run_time, self.publish_target_info,
                               format_timestamp(next_analysis_time))
示例#4
0
    def pre_aggr(self, cur_analysis_time):
        _log.debug("AfddAggrAgent: start aggregating result...")

        # Do aggregation at the beginning of day, week, month
        if cur_analysis_time.minute == 0: # start of hour
            if cur_analysis_time.hour == 0: # start of day
                self.daily_aggr(cur_analysis_time)
                if cur_analysis_time.weekday() == 0: # start of week (Monday)
                    self.weekly_aggr(cur_analysis_time)
                if cur_analysis_time.day == 1: # start of month
                    self.monthly_aggr(cur_analysis_time)

        # Schedule for next hour
        next_analysis_time = cur_analysis_time.replace(minute=0,
                                                       second=0,
                                                       microsecond=0)
        next_analysis_time += timedelta(hours=1)
        cur_analysis_time_utc = cur_analysis_time.astimezone(pytz.utc)
        next_run_time_utc = cur_analysis_time_utc + timedelta(hours=1)

        # Set next_run_time to 15s after current run if it is in the past
        if self.op_mode == 'manual':
            if next_run_time_utc < get_aware_utc_now():
                next_run_time_utc = get_aware_utc_now() + timedelta(seconds=60)

        self.core.schedule(next_run_time_utc, self.pre_aggr, next_analysis_time)
示例#5
0
 def __init__(self, request_id, report_request_id, report_specifier_id):
     self.created_on = utils.get_aware_utc_now()
     self.request_id = request_id
     self.report_request_id = report_request_id
     self.report_specifier_id = report_specifier_id
     self.status = 'inactive'
     self.last_report = utils.get_aware_utc_now()
示例#6
0
def test_agent_logs(volttron_instance, agent):
    """
    Test if alert agent's start and stop time are getting logged correctly
    :param volttron_instance: instance in which alert agent is running
    :param agent: fake agent used to make rpc calls to alert agent
    """
    global alert_messages, db_connection, alert_uuid
    stop_t = get_aware_utc_now()
    volttron_instance.stop_agent(alert_uuid)
    gevent.sleep(1)
    c = db_connection.cursor()
    c.execute("SELECT * FROM agent_log "
              "WHERE start_time IS NOT NULL AND "
              "stop_time > '{}'".format(stop_t))
    r = c.fetchall()
    assert len(r) == 1
    start_t = get_aware_utc_now()
    volttron_instance.start_agent(alert_uuid)
    gevent.sleep(4)
    stop_t = get_aware_utc_now()
    volttron_instance.stop_agent(alert_uuid)
    c.execute("SELECT * FROM agent_log "
              "WHERE start_time > '{}' AND "
              "stop_time > '{}'".format(start_t, stop_t))
    r = c.fetchall()
    assert len(r) == 1
    volttron_instance.start_agent(alert_uuid)
    gevent.sleep(1)
示例#7
0
    def start_new_cycle(self, peer, sender, bus, topic, headers, message):
        _log.debug("Trigger market period for Market agent.")
        gevent.sleep(self.reservation_delay)
        self.send_collect_reservations_request(utils.get_aware_utc_now())

        gevent.sleep(self.offer_delay)
        self.send_collect_offers_request(utils.get_aware_utc_now())
示例#8
0
 def periodic_read(self, now):
     # we not use self.core.schedule to prevent drift.
     next_scrape_time = now + datetime.timedelta(seconds=self.interval)
     # Sanity check now.
     # This is specifically for when this is running in a VM that gets
     # suspended and then resumed.
     # If we don't make this check a resumed VM will publish one event
     # per minute of
     # time the VM was suspended for.
     test_now = utils.get_aware_utc_now()
     if test_now - next_scrape_time > datetime.timedelta(seconds=self.interval):
         next_scrape_time = self.find_starting_datetime(test_now)
     self.periodic_read_event = self.core.schedule(next_scrape_time, self.periodic_read, next_scrape_time)
     _log.debug("scraping device: " + self.device_name)
     try:
         results = self.interface.scrape_all()
     except Exception as ex:
         _log.error('Failed to scrape ' + self.device_name + ': ' + str(ex))
         return
     if results:
         utcnow_string = utils.format_timestamp(utils.get_aware_utc_now())
         headers = {headers_mod.DATE: utcnow_string,
                    headers_mod.TIMESTAMP: utcnow_string, }
         for point, value in results.iteritems():
             depth_first_topic, breadth_first_topic = self.get_paths_for_point(point)
             message = [value, self.meta_data[point]]
             self._publish_wrapper(depth_first_topic, headers=headers, message=message)
             self._publish_wrapper(breadth_first_topic, headers=headers, message=message)
         message = [results, self.meta_data]
         self._publish_wrapper(self.all_path_depth, headers=headers, message=message)
         self._publish_wrapper(self.all_path_breadth, headers=headers, message=message)
示例#9
0
def test_for_duplicate_logs(volttron_instance, agent, cleanup_db):
    """
    Test if records are not getting duplicated in database after every watch
    time interval. When a topic is not seen within the configured time
    frame a single row is inserted into database for that topic. When the topic
    is seen again the same row is updated with timestamp of when the
    topic message was seen.
    :param volttron_instance: instance in which alert agent is running
    :param agent: fake agent used to make rpc calls to alert agent
    :param cleanup_db: function scope fixture to clean up alert and agent log
    tables in database.
    """
    global db_connection, alert_messages, alert_uuid
    volttron_instance.stop_agent(alert_uuid)
    gevent.sleep(1)
    start_t = get_aware_utc_now()
    volttron_instance.start_agent(alert_uuid)
    gevent.sleep(6)
    c = db_connection.cursor()
    c.execute('SELECT * FROM topic_log '
              'WHERE first_seen_after_timeout is NULL '
              'AND last_seen_before_timeout is NULL'.format(start_t))
    results = c.fetchall()
    assert results is not None
    assert len(results)

    gevent.sleep(6)
    c = db_connection.cursor()
    c.execute('SELECT * FROM topic_log '
              'WHERE first_seen_after_timeout is NULL '
              'AND last_seen_before_timeout is NULL'.format(start_t))
    results = c.fetchall()
    assert results is not None
    assert len(results) == 3

    publish_time = get_aware_utc_now()
    agent.vip.pubsub.publish(peer='pubsub', topic='fakedevice')
    agent.vip.pubsub.publish(peer='pubsub',
                             topic='fakedevice2/all',
                             message=[{
                                 'point': 'value'
                             }])
    gevent.sleep(2)
    c = db_connection.cursor()
    c.execute('SELECT topic, last_seen_before_timeout, '
              'first_seen_after_timeout FROM topic_log ')
    results = c.fetchall()
    assert len(results) == 3
    for r in results:
        assert r[1] is None
        naive_timestamp = publish_time.replace(tzinfo=None)
        assert r[2] >= naive_timestamp
    def report(self):
        """ Report result of diagnostic analysis and publish
        to the VOLTTRON message bus.

        :return: None
        """
        # Multiple control steps and analysis can occur for each diagnostic
        # if self.fault_condition == all then all steps must have a fault
        # condition for a fault to be reported.
        self.headers = {
            "Date": format_timestamp(get_aware_utc_now()),
            "Timestamp": format_timestamp(get_aware_utc_now())
        }
        analysis = {}
        if -1 in self.evaluations:
            LOG.debug("Diagnostic %s resulted in inconclusive result",
                      self.name)
            analysis = {"result": -1}
            for publish_topic in self.analysis_topic:
                self.vip.pubsub.publish("pubsub",
                                        self.analysis_topic,
                                        headers=self.headers,
                                        message=analysis)
            self.evaluations = []
            return

        if self.fault_condition == "any":
            if False in self.evaluations:
                LOG.debug("%s - no fault detected", self.name)
                analysis = {"result": self.non_fault_code}
            else:
                LOG.debug("%s - fault detected", self.name)
                analysis = {"result": self.fault_code}
        # Multiple control steps and analysis can occur for each diagnostic
        # if self.fault_condition == "any"" then any step where a
        # fault condition is detected will lead to reporting a fault.
        else:
            if True in self.evaluations:
                LOG.debug("%s - fault detected", self.name)
                analysis = {"result": self.fault_code}
            else:
                LOG.debug("%s - no fault detected", self.name)
                analysis = {"result": self.non_fault_code}

        # Reinitialize evaluations list for use in next diagnostic run.
        for publish_topic in self.analysis_topic:
            self.vip.pubsub.publish("pubsub",
                                    publish_topic,
                                    headers=self.headers,
                                    message=analysis)
        self.evaluations = []
示例#11
0
def test_for_duplicate_logs(volttron_instance1, agent, cleanup_db):
    """
    Test if records are not getting duplicated in database after every watch
    time interval. When a topic is not seen within the configured time
    frame a single row is inserted into database for that topic. When the topic
    is seen again the same row is updated with timestamp of when the
    topic message was seen.
    :param volttron_instance1: instance in which alert agent is running
    :param agent: fake agent used to make rpc calls to alert agent
    :param cleanup_db: function scope fixture to clean up alert and agent log
    tables in database.
    """
    global db_connection, alert_messages, alert_uuid
    volttron_instance1.stop_agent(alert_uuid)
    gevent.sleep(1)
    start_t = get_aware_utc_now()
    volttron_instance1.start_agent(alert_uuid)
    gevent.sleep(6)
    c = db_connection.cursor()
    c.execute('SELECT * FROM topic_log '
              'WHERE first_seen_after_timeout is NULL '
              'AND last_seen_before_timeout is NULL'.format(start_t))
    results = c.fetchall()
    assert results is not None
    assert len(results)

    gevent.sleep(6)
    c = db_connection.cursor()
    c.execute('SELECT * FROM topic_log '
              'WHERE first_seen_after_timeout is NULL '
              'AND last_seen_before_timeout is NULL'.format(start_t))
    results = c.fetchall()
    assert results is not None
    assert len(results) == 3

    publish_time = get_aware_utc_now()
    agent.vip.pubsub.publish(peer='pubsub',
                             topic='fakedevice')
    agent.vip.pubsub.publish(peer='pubsub',
                             topic='fakedevice2/all',
                             message=[{'point': 'value'}])
    gevent.sleep(2)
    c = db_connection.cursor()
    c.execute('SELECT topic, last_seen_before_timeout, '
              'first_seen_after_timeout FROM topic_log ')
    results = c.fetchall()
    assert len(results) == 3
    for r in results:
        assert r[1] is None
        non_utc = publish_time.replace(tzinfo=None)
        assert r[2] >= non_utc
示例#12
0
def test_ignore_topic(agent):
    """
    Test ignore_topic rpc call. When a topic is ignored, it should not appear
    in future alert messages
    :param agent: fake agent used to make rpc calls to alert agent
    """
    global alert_messages, db_connection

    agent.vip.rpc.call(PLATFORM_TOPIC_WATCHER, 'ignore_topic', 'group1',
                       'fakedevice2/all').get()
    alert_messages.clear()
    publish_time = get_aware_utc_now()
    agent.vip.pubsub.publish(peer='pubsub',
                             topic='fakedevice')
    agent.vip.pubsub.publish(peer='pubsub',
                             topic='fakedevice2/all',
                             message=[{'point': 'value'}])
    print("Alert messages {}".format(alert_messages))
    gevent.sleep(7)
    assert len(alert_messages) == 1
    assert u"Topic(s) not published within time limit: ['fakedevice']" in \
           alert_messages
    c = db_connection.cursor()
    c.execute('SELECT * FROM topic_log '
              'WHERE first_seen_after_timeout is NULL '
              'AND last_seen_before_timeout > "{}"'.format(publish_time))
    results = c.fetchall()
    topics = []
    assert results is not None
    assert len(results) == 1
    assert results[0][0] == u'fakedevice'
    assert results[0][2] is None
示例#13
0
 def build_metadata_oadr_report(self, report):
     descriptions = []
     for tel_vals in json.loads(report.telemetry_parameters).values():
         # Rule 305: For TELEMETRY_USAGE reports, units in reportDescription.itemBase should be powerReal.
         if tel_vals['units'] == 'powerReal':
             item_base = oadr_20b.PowerRealType(itemDescription='RealPower',
                                                itemUnits='W',
                                                siScaleCode=None,
                                                powerAttributes=None)
         else:
             item_base = None
         min_freq, max_freq = tel_vals['min_frequency'], tel_vals['max_frequency']
         desc = oadr_20b.oadrReportDescriptionType(rID=tel_vals['r_id'],
                                                   reportType=tel_vals['report_type'],
                                                   readingType=tel_vals['reading_type'],
                                                   itemBase=item_base,
                                                   oadrSamplingRate=self.build_sampling_rate(min_freq, max_freq))
         descriptions.append(desc)
     rpt_interval_duration = isodate.duration_isoformat(timedelta(seconds=report.interval_secs))
     return oadr_20b.oadrReportType(duration=oadr_20b.DurationPropType(rpt_interval_duration),
                                    oadrReportDescription=descriptions,
                                    reportRequestID=None,
                                    reportSpecifierID=report.report_specifier_id,
                                    reportName=report.name,
                                    createdDateTime=utils.get_aware_utc_now())
示例#14
0
    def onstart(self, sender, **kwargs):

        _log.debug("Setting up log DB.")
        self._connection = sqlite3.connect(
            'alert_log.sqlite',
            detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
        c = self._connection.cursor()

        c.execute("CREATE TABLE IF NOT EXISTS topic_log( "
                  "topic TEXT, "
                  "last_seen_before_timeout TIMESTAMP, "
                  "first_seen_after_timeout TIMESTAMP,"
                  "PRIMARY KEY(topic, last_seen_before_timeout))")

        c.execute("CREATE INDEX IF NOT EXISTS topic_index ON "
                  "topic_log (topic)")
        c.execute("CREATE INDEX IF NOT EXISTS down_time_index ON "
                  "topic_log (last_seen_before_timeout)")
        c.execute("CREATE INDEX IF NOT EXISTS up_time_index ON "
                  "topic_log (first_seen_after_timeout)")


        c.execute("CREATE TABLE IF NOT EXISTS agent_log ("
                  "start_time TIMESTAMP, "
                  "stop_time TIMESTAMP)")
        c.execute("CREATE INDEX IF NOT EXISTS stop_ts_index ON "
                  "agent_log (stop_time)")
        c.execute("INSERT INTO agent_log(start_time) values(?)",
                  (get_aware_utc_now(),))
        c.close()
        self._connection.commit()

        for group_name, config in self.config.items():
            self.group_agent[group_name] = self.spawn_alert_group(group_name,
                                                                  config)
示例#15
0
    def start_new_cycle(self, peer, sender, bus, topic, headers, message):
        _log.debug("Trigger market period for Market agent.")

        # Store received prices so we can use it later when doing clearing process
        prices = message['prices']  # Array of prices
        self.prices = prices
        self.market_list.prices = prices

        _log.debug("Clearing prices are [{prices}]".format(
            prices=str.join(',', [str(p) for p in self.prices])))

        gevent.sleep(self.reservation_delay)
        self.send_collect_reservations_request(utils.get_aware_utc_now())

        gevent.sleep(self.offer_delay)
        self.send_collect_offers_request(utils.get_aware_utc_now())
示例#16
0
    def backup_new_data(self, new_publish_list):
        """
        :param new_publish_list: A list of records to cache to disk.
        :type new_publish_list: list
        """
        _log.debug("Backing up unpublished values.")
        c = self._connection.cursor()

        if self._backup_storage_limit_gb is not None:

            def page_count():
                c.execute("PRAGMA page_count")
                return c.fetchone()[0]

            while page_count() >= self.max_pages:
                self._owner().vip.pubsub.publish('pubsub', 'backupdb/nomore')
                c.execute(
                    '''DELETE FROM outstanding
                    WHERE ROWID IN
                    (SELECT ROWID FROM outstanding
                    ORDER BY ROWID ASC LIMIT 100)''')


        for item in new_publish_list:
            source = item['source']
            topic = item['topic']
            meta = item.get('meta', {})
            values = item['readings']

            topic_id = self._backup_cache.get(topic)

            if topic_id is None:
                c.execute('''INSERT INTO topics values (?,?)''',
                          (None, topic))
                c.execute('''SELECT last_insert_rowid()''')
                row = c.fetchone()
                topic_id = row[0]
                self._backup_cache[topic_id] = topic
                self._backup_cache[topic] = topic_id

            meta_dict = self._meta_data[(source, topic_id)]
            for name, value in meta.iteritems():
                current_meta_value = meta_dict.get(name)
                if current_meta_value != value:
                    c.execute('''INSERT OR REPLACE INTO metadata
                                 values(?, ?, ?, ?)''',
                              (source, topic_id, name, value))
                    meta_dict[name] = value

            for timestamp, value in values:
                if timestamp is None:
                    timestamp = get_aware_utc_now()
                _log.debug("Inserting into outstanding table with timestamp "
                           "{}".format(timestamp))
                c.execute(
                    '''INSERT OR REPLACE INTO outstanding
                    values(NULL, ?, ?, ?, ?)''',
                    (timestamp, source, topic_id, jsonapi.dumps(value)))

        self._connection.commit()
示例#17
0
    def set_last_read(self):
        try:
            last_read = self.vip.config.get('last_read')
        except (StandardError, gevent.Timeout) as e:
            _log.debug(e)
            last_read = None

        if last_read is not None:
            if type(last_read) is not dict:
                _log.error(
                    "ERROR PROCESSING CONFIGURATION: last_read file does not contain dictionary"
                )
                last_read = None

        if last_read is None:
            last_read = {}

        backup_last_read = utils.format_timestamp(
            utils.get_aware_utc_now() +
            datetime.timedelta(hours=-1 * self.default_last_read))

        for r in self.registers:
            new_last_read = last_read.get(r.index, backup_last_read)
            last_read[r.index] = r.last_read = new_last_read

        self.last_read = last_read
示例#18
0
 def sample_event(cls):
     """Return a sample EiEvent for debugging purposes."""
     sample = cls('123456', '12345')
     sample.start_time = utils.get_aware_utc_now()
     sample.end_time = sample.start_time + timedelta(hours=1)
     sample.opt_type = 'optIn'
     return sample
示例#19
0
 def publish_data(self, topic, value, time_stamp):
     headers = {headers_mod.DATE: format_timestamp(get_aware_utc_now())}
     message = {"Value": value}
     message["TimeStamp"] = format_timestamp(time_stamp)
     topic = "/".join([self.logging_topic, topic])
     _log.debug("LOGGING {} - {} - {}".format(topic, value, time_stamp))
     self.parent.vip.pubsub.publish("pubsub", topic, headers, message).get()
示例#20
0
    def _on_platform_log_message(self, peer, sender, bus, topic, headers,
                                 message):
        """ Receive message from a registered platform

        This method is called with stats from the registered platform agents.

        """
        _log.debug('Got topic: {}'.format(topic))
        _log.debug('Got message: {}'.format(message))

        topicsplit = topic.split('/')
        platform_uuid = topicsplit[2]

        # For devices we use everything between devices/../all as a unique
        # key for determining the last time it was seen.
        key = '/'.join(topicsplit[:])
        _log.debug("key is: {}".format(key))
        uuid = topicsplit[2]

        point_list = []

        for point, item in message.iteritems():
            point_list.append(point)

        stats = {
            'topic': key,
            'points': point_list,
            'last_published_utc': format_timestamp(get_aware_utc_now())
        }

        self._registry.update_performance(platform_uuid=platform_uuid,
                                          performance=stats)
示例#21
0
def test_watch_device_new_group(volttron_instance, agent, cleanup_db):
    """
    Test adding a new point topic to watch list. Add the topic to a new watch
    group. Agent should start watching for the new topic and should send correct
    alert messages and update database entries for the new topic
    :param volttron_instance: instance in which alert agent is running
    :param agent: fake agent used to make rpc calls to alert agent
    :param cleanup_db: function scope fixture to clean up alert and agent log
    tables in database.
    """
    global alert_messages, db_connection
    volttron_instance.stop_agent(alert_uuid)
    alert_messages.clear()
    volttron_instance.start_agent(alert_uuid)
    gevent.sleep(1)
    publish_time = get_aware_utc_now()
    agent.vip.pubsub.publish(peer='pubsub', topic='fakedevice')
    agent.vip.pubsub.publish(peer='pubsub',
                             topic='fakedevice2/all',
                             message=[{
                                 'point': 'value'
                             }])
    gevent.sleep(1)
    agent.vip.rpc.call(PLATFORM_TOPIC_WATCHER, 'watch_device', 'group2',
                       'newtopic/all', 5, ['point']).get()
    gevent.sleep(6)

    assert len(alert_messages) == 2
    # topics are ordered within a group based on the the first element in the tuple
    assert "Topic(s) not published within time limit: ['fakedevice', " \
           "'fakedevice2/all', ('fakedevice2/all', 'point')]" in \
           alert_messages  or \
           "Topic(s) not published within time limit: ['fakedevice', " \
           "('fakedevice2/all', 'point'), 'fakedevice2/all']" in alert_messages

    assert "Topic(s) not published within time limit: [" \
           "('newtopic/all', 'point'), 'newtopic/all']" in \
           alert_messages  or \
           "Topic(s) not published within time limit: [" \
           "'newtopic/all', ('newtopic/all', 'point')]" in \
           alert_messages

    c = db_connection.cursor()
    c.execute('SELECT * FROM topic_log '
              'WHERE first_seen_after_timeout IS NULL '
              'AND last_seen_before_timeout IS NULL')
    results = c.fetchall()
    topics = []
    assert results is not None
    assert len(results) == 2
    assert {results[0][0], results[1][0]} == {'newtopic/all', 'newtopic/point'}
    assert results[0][2] == results[1][2] is None

    c.execute('SELECT * FROM topic_log '
              'WHERE first_seen_after_timeout is NULL '
              'AND last_seen_before_timeout > "{}"'.format(publish_time))
    results = c.fetchall()
    topics = []
    assert results is not None
    assert len(results) == 3
示例#22
0
    def _collect_key(self, web_address):
        """
        Try to get (server key, instance name, vip-address) of remote instance and send it to RoutingService
        to connect to the remote instance. If unsuccessful, try again later.
        :param name: instance name
        :param web_address: web address of remote instance
        :return:
        """
        platform_info = dict()

        try:
            platform_info = self._get_platform_discovery(web_address)
            with self._ext_addresses_store_lock:
                _log.debug("Platform discovery info: {}".format(platform_info))
                name = platform_info['instance-name']
                self._ext_addresses_store[name] = platform_info
                self._ext_addresses_store.async_sync()
        except KeyError as exc:
            _log.error("Discovery info does not contain instance name {}".format(exc))
        except DiscoveryError:
            # If discovery error, try again later
            sec = random.random() * self.r + 30
            delay = utils.get_aware_utc_now() + timedelta(seconds=sec)
            grnlet = self.core.schedule(delay, self._collect_key, web_address)
        except ConnectionError as e:
            _log.error("HTTP connection error {}".format(e))

        #If platform discovery is successful, send the info to RoutingService
        #to establish connection with remote platform.
        if platform_info:
            op = b'setupmode_platform_connection'
            connection_settings = dict(platform_info)
            connection_settings['web-address'] = web_address
            self._send_to_router(op, connection_settings)
示例#23
0
文件: agent.py 项目: miraabid/bemoss
    def _on_platform_log_message(self, peer, sender, bus, topic, headers,
                                 message):
        """ Receive message from a registered platform

        This method is called with stats from the registered platform agents.

        """
        _log.debug('Got topic: {}'.format(topic))
        _log.debug('Got message: {}'.format(message))

        topicsplit = topic.split('/')
        platform_uuid = topicsplit[2]

        # For devices we use everything between devices/../all as a unique
        # key for determining the last time it was seen.
        key = '/'.join(topicsplit[:])
        _log.debug("key is: {}".format(key))
        uuid = topicsplit[2]

        point_list = []

        for point, item in message.iteritems():
            point_list.append(point)

        stats = {
            'topic': key,
            'points': point_list,
            'last_published_utc': format_timestamp(get_aware_utc_now())
        }

        self._registry.update_performance(platform_uuid=platform_uuid,
                                          performance=stats)
示例#24
0
    def _capture_data(self, peer, sender, bus, topic, headers, message,
                      device):
        timestamp_string = headers.get(headers_mod.DATE, None)
        timestamp = get_aware_utc_now()
        if timestamp_string is not None:
            timestamp, my_tz = process_timestamp(timestamp_string, topic)
        try:
            # 2.0 agents compatability layer makes sender == pubsub.compat so
            # we can do the proper thing when it is here
            message = self._clean_compat(sender, topic, headers, message)
        except Exception as e:
            _log.exception(e)
            return
        try:
            if isinstance(message, dict):
                values = message
            else:
                values = message[0]
        except Exception as e:
            _log.exception(e)
            return

        if topic.startswith('analysis'):
            source = 'analysis'
        else:
            source = 'scrape'
        _log.debug(
            "Queuing {topic} from {source} for publish".format(topic=topic,
                                                               source=source))

        for key, value in values.iteritems():
            self._add_to_cache(device, key, value)
示例#25
0
    def get_value_async_result(self,
                               username=None,
                               password=None,
                               start_time=None,
                               end_time=None):
        if end_time is None:
            end_time = utils.get_aware_utc_now()

        url = self.url + self.interface_point_name + '/~historyQuery'

        if isinstance(start_time, str):
            start_time = utils.parse_timestamp_string(start_time)

        # becchp.com does not accept percent-encoded parameters
        # requests is not configurable to not encode (from lead dev: https://stackoverflow.com/a/23497903)
        # do it manually:
        payload = {
            'start': self.time_format(start_time),
            'end': self.time_format(end_time)
        }
        payload_str = "&".join("%s=%s" % (k, v) for k, v in payload.items())

        return grequests.get(url,
                             auth=(username, password),
                             params=payload_str)
示例#26
0
def test_ignore_topic(agent):
    """
    Test ignore_topic rpc call. When a topic is ignored, it should not appear
    in future alert messages
    :param agent: fake agent used to make rpc calls to alert agent
    """
    global alert_messages, db_connection

    agent.vip.rpc.call(PLATFORM_TOPIC_WATCHER, 'ignore_topic', 'group1',
                       'fakedevice2/all').get()
    alert_messages.clear()
    publish_time = get_aware_utc_now()
    agent.vip.pubsub.publish(peer='pubsub',
                             topic='fakedevice')
    agent.vip.pubsub.publish(peer='pubsub',
                             topic='fakedevice2/all',
                             message=[{'point': 'value'}])
    print("Alert messages {}".format(alert_messages))
    gevent.sleep(7)
    assert len(alert_messages) == 1
    assert u"Topic(s) not published within time limit: ['fakedevice']" in \
           alert_messages
    c = db_connection.cursor()
    c.execute('SELECT * FROM topic_log '
              'WHERE first_seen_after_timeout is NULL '
              'AND last_seen_before_timeout > "{}"'.format(publish_time))
    results = c.fetchall()
    topics = []
    assert results is not None
    assert len(results) == 1
    assert results[0][0] == u'fakedevice'
    assert results[0][2] == None
示例#27
0
 def publish_all(self, observation, topic_prefix="weather", headers={}):
     utcnow = utils.get_aware_utc_now()
     utcnow_string = utils.format_timestamp(utcnow)
     headers.update({HEADER_NAME_DATE: utcnow_string,
                     headers_mod.TIMESTAMP: utcnow_string})
     self.publish_subtopic(self.build_dictionary(observation),
                           topic_prefix, headers)
示例#28
0
    def on_polling(self):
        if self.zip is None and (self.region is None or self.city is None):
            return

        kwargs = {}
        if self.zip is not None:
            kwargs['zip'] = self.zip
            topic = 'weather2/polling/current/ZIP/{zip}/all'.format(zip=self.zip)
        else:
            kwargs['region'] = self.region
            kwargs['city'] = self.city
            topic = 'weather2/polling/current/{region}/{city}/all'.format(
                region=self.region,
                city=self.city
            )
        wu_resp = self.wu_service.current(**kwargs)
        publish_items = self.build_resp_current(wu_resp)

        if len(publish_items) > 0:
            headers = {
                HEADER_NAME_DATE: format_timestamp(utils.get_aware_utc_now()),
                HEADER_NAME_CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON
            }
            self.vip.pubsub.publish(peer='pubsub',
                                    topic=topic,
                                    message=publish_items,
                                    headers=headers)
            _log.debug(publish_items)
示例#29
0
    def _capture_log_data(self, peer, sender, bus, topic, headers, message):
        """Capture log data and submit it to be published by a historian."""
        try:
            data = self._clean_compat(sender, topic, headers, message)
        except:
            return

        for point, item in data.iteritems():
            if 'Readings' not in item or 'Units' not in item:
                _log.error("logging request for {topic} missing Readings "
                           "or Units".format(topic=topic))
                continue
            units = item['Units']
            dtype = item.get('data_type', 'float')
            tz = item.get('tz', None)
            if dtype == 'double':
                dtype = 'float'

            meta = {'units': units, 'type': dtype}

            readings = item['Readings']

            if not isinstance(readings, list):
                readings = [(get_aware_utc_now(), readings)]
            elif isinstance(readings[0], str):
                my_ts, my_tz = process_timestamp(readings[0], topic)
                readings = [(my_ts, readings[1])]
                if tz:
                    meta['tz'] = tz
                elif my_tz:
                    meta['tz'] = my_tz

            self._add_to_cache(topic, point, readings[1])
示例#30
0
    def _on_device_message(self, peer, sender, bus, topic, headers, message):
        # only deal with agents that have not been forwarded.
        if headers.get('X-Forwarded', None):
            return

        # only listen to the ending all message.
        if not re.match('.*/all$', topic):
            return

        topicsplit = topic.split('/')

        # For devices we use everything between devices/../all as a unique
        # key for determining the last time it was seen.
        key = '/'.join(topicsplit[1: -1])

        anon_topic = self._topic_replace_map[key]

        if not anon_topic:
            anon_topic = key

            for sr in self._topic_replace_list:
                _log.debug(
                    'anon replacing {}->{}'.format(sr['from'], sr['to']))
                anon_topic = anon_topic.replace(sr['from'],
                                                sr['to'])
            _log.debug('anon after replacing {}'.format(anon_topic))
            _log.debug('Anon topic is: {}'.format(anon_topic))
            self._topic_replace_map[key] = anon_topic
        _log.debug('DEVICES ON PLATFORM ARE: {}'.format(self._devices))
        self._devices[anon_topic] = {
            'points': message[0].keys(),
            'last_published_utc': format_timestamp(get_aware_utc_now())
        }
示例#31
0
    def get_devices(self):
        cp = deepcopy(self._devices)
        foundbad = False

        for k, v in cp.items():
            dt = parse_timestamp_string(v['last_published_utc'])
            dtnow = get_aware_utc_now()
            if dt+datetime.timedelta(minutes=5) < dtnow:
                v['health'] = Status.build(
                    BAD_STATUS,
                    'Too long between publishes for {}'.format(k)).as_dict()
                foundbad = True
            else:
                v['health'] = Status.build(GOOD_STATUS).as_dict()

        if len(cp):
            if foundbad:
                self.vip.health.set_status(
                    BAD_STATUS,
                    'At least one device has not published in 5 minutes')
            else:
                self.vip.health.set_status(
                    GOOD_STATUS,
                    'All devices publishing normally.'
                )
        return cp
示例#32
0
    def _collect_key(self, web_address):
        """
        Try to get (server key, instance name, vip-address) of remote instance and send it to RoutingService
        to connect to the remote instance. If unsuccessful, try again later.
        :param name: instance name
        :param web_address: web address of remote instance
        :return:
        """
        platform_info = dict()

        try:
            platform_info = self._get_platform_discovery(web_address)
            with self._ext_addresses_store_lock:
                _log.debug("Platform discovery info: {}".format(platform_info))
                name = platform_info['instance-name']
                self._ext_addresses_store[name] = platform_info
                self._ext_addresses_store.async_sync()
        except KeyError as exc:
            _log.error(
                "Discovery info does not contain instance name {}".format(exc))
        except DiscoveryError:
            # If discovery error, try again later
            sec = random.random() * self.r + 30
            delay = utils.get_aware_utc_now() + timedelta(seconds=sec)
            grnlet = self.core.schedule(delay, self._collect_key, web_address)
        except ConnectionError as e:
            _log.error("HTTP connection error {}".format(e))

        #If platform discovery is successful, send the info to RoutingService
        #to establish connection with remote platform.
        if platform_info:
            op = b'setupmode_platform_connection'
            connection_settings = dict(platform_info)
            connection_settings['web-address'] = web_address
            self._send_to_router(op, connection_settings)
示例#33
0
 def _update_override_interval(self, interval, pattern):
     if interval <= 0.0:  # indicative of indefinite duration
         if pattern in self._override_interval_events:
             # If override duration is indifinite, do nothing
             if self._override_interval_events[pattern] is None:
                 return False
             else:
                 # Cancel the old event
                 evt = self._override_interval_events.pop(pattern)
                 evt[0].cancel()
         self._override_interval_events[pattern] = None
         return True
     else:
         override_start = utils.get_aware_utc_now()
         override_end = override_start + timedelta(seconds=interval)
         if pattern in self._override_interval_events:
             evt = self._override_interval_events[pattern]
             # If event is indefinite or greater than new end time, do nothing
             if evt is None or override_end < evt[1]:
                 return False
             else:
                 evt = self._override_interval_events.pop(pattern)
                 evt[0].cancel()
         # Schedule new override event
         event = self.core.schedule(override_end, self._cancel_override,
                                    pattern)
         self._override_interval_events[pattern] = (event, override_end)
         return True
示例#34
0
    def onstart(self, sender, **kwargs):

        _log.debug("Setting up log DB.")
        self._connection = sqlite3.connect('alert_log.sqlite',
                                           detect_types=sqlite3.PARSE_DECLTYPES
                                           | sqlite3.PARSE_COLNAMES)
        c = self._connection.cursor()

        c.execute("CREATE TABLE IF NOT EXISTS topic_log( "
                  "topic TEXT, "
                  "last_seen_before_timeout TIMESTAMP, "
                  "first_seen_after_timeout TIMESTAMP,"
                  "PRIMARY KEY(topic, last_seen_before_timeout))")

        c.execute("CREATE INDEX IF NOT EXISTS topic_index ON "
                  "topic_log (topic)")
        c.execute("CREATE INDEX IF NOT EXISTS down_time_index ON "
                  "topic_log (last_seen_before_timeout)")
        c.execute("CREATE INDEX IF NOT EXISTS up_time_index ON "
                  "topic_log (first_seen_after_timeout)")

        c.execute("CREATE TABLE IF NOT EXISTS agent_log ("
                  "start_time TIMESTAMP, "
                  "stop_time TIMESTAMP)")
        c.execute("CREATE INDEX IF NOT EXISTS stop_ts_index ON "
                  "agent_log (stop_time)")
        c.execute("INSERT INTO agent_log(start_time) values(?)",
                  (get_aware_utc_now(), ))
        c.close()
        self._connection.commit()

        for group_name, config in self.config.items():
            self.group_agent[group_name] = self.spawn_alert_group(
                group_name, config)
示例#35
0
    def _on_device_message(self, peer, sender, bus, topic, headers, message):
        # only deal with agents that have not been forwarded.
        if headers.get('X-Forwarded', None):
            return

        # only listen to the ending all message.
        if not re.match('.*/all$', topic):
            return

        topicsplit = topic.split('/')

        # For devices we use everything between devices/../all as a unique
        # key for determining the last time it was seen.
        key = '/'.join(topicsplit[1:-1])

        anon_topic = self._topic_replace_map[key]

        if not anon_topic:
            anon_topic = key

            for sr in self._topic_replace_list:
                _log.debug('anon replacing {}->{}'.format(
                    sr['from'], sr['to']))
                anon_topic = anon_topic.replace(sr['from'], sr['to'])
            _log.debug('anon after replacing {}'.format(anon_topic))
            _log.debug('Anon topic is: {}'.format(anon_topic))
            self._topic_replace_map[key] = anon_topic
        _log.debug('DEVICES ON PLATFORM ARE: {}'.format(self._devices))
        self._devices[anon_topic] = {
            'points': message[0].keys(),
            'last_published_utc': format_timestamp(get_aware_utc_now())
        }
示例#36
0
    def update_status(self, status, context=None):
        """
        Updates the internal state of the `Status` object.

        This method will throw errors if the context is not serializable or
        if the status parameter is not within the ACCEPTABLE_STATUS tuple.

        :param status:
        :param context:
        :return:
        """
        if status not in ACCEPTABLE_STATUS:
            raise ValueError('Invalid status value {}'.format(status))
        try:
            jsonapi.dumps(context)
        except TypeError:
            raise ValueError('Context must be JSON serializable.')

        status_changed = status != self._status
        self._status = status
        self._context = context
        self._last_updated = format_timestamp(get_aware_utc_now())

        if status_changed and self._status_changed_callback:
            self._status_changed_callback()
示例#37
0
    def update_status(self, status, context=None):
        """
        Updates the internal state of the `Status` object.

        This method will throw errors if the context is not serializable or
        if the status parameter is not within the ACCEPTABLE_STATUS tuple.

        :param status:
        :param context:
        :return:
        """
        if status not in ACCEPTABLE_STATUS:
            raise ValueError('Invalid status value {}'.format(status))
        try:
            jsonapi.dumps(context)
        except TypeError:
            raise ValueError('Context must be JSON serializable.')

        status_changed = status != self._status
        self._status = status
        self._context = context
        self._last_updated = format_timestamp(get_aware_utc_now())

        if status_changed and self._status_changed_callback:
            print(self._status_changed_callback())
示例#38
0
    def update_override_patterns(self):
        if self._override_patterns is None:
            try:
                values = self.vip.config.get("override_patterns")
                values = jsonapi.loads(values)

                if isinstance(values, dict):
                    self._override_patterns = set()
                    for pattern, end_time in values.items():
                        # check the end_time
                        now = utils.get_aware_utc_now()
                        # If end time is indefinite, set override with indefinite duration
                        if end_time == "0.0":
                            self._set_override_on(pattern, 0.0, from_config_store=True)
                        else:
                            end_time = utils.parse_timestamp_string(end_time)
                            # If end time > current time, set override with new duration
                            if end_time > now:
                                delta = end_time - now
                                self._set_override_on(pattern, delta.total_seconds(), from_config_store=True)
                else:
                    self._override_patterns = set()
            except KeyError:
                self._override_patterns = set()
            except ValueError:
                _log.error("Override patterns is not set correctly in config store")
                self._override_patterns = set()
示例#39
0
    def update_scrape_schedule(self, time_slot, driver_scrape_interval, group,
                               group_offset_interval):
        self.time_slot_offset = (time_slot * driver_scrape_interval) + (
            group * group_offset_interval)
        self.time_slot = time_slot
        self.group = group

        _log.debug("{} group: {}, time_slot: {}, offset: {}".format(
            self.device_path, group, time_slot, self.time_slot_offset))

        if self.time_slot_offset >= self.interval:
            _log.warning(
                "Scrape offset exceeds interval. Required adjustment will cause scrapes to double up with other devices."
            )
            while self.time_slot_offset >= self.interval:
                self.time_slot_offset -= self.interval

        #check weather or not we have run our starting method.
        if not self.periodic_read_event:
            return

        self.periodic_read_event.cancel()

        next_periodic_read = self.find_starting_datetime(
            utils.get_aware_utc_now())

        self.periodic_read_event = self.core.schedule(next_periodic_read,
                                                      self.periodic_read,
                                                      next_periodic_read)
示例#40
0
def test_calls_exceeded(volttron_instance, cleanup_cache, query_agent, weather):
    weather_uuid = weather[0]
    identity = weather[1]
    version = query_agent.vip.rpc.call(identity, 'get_version').get(timeout=3)
    cwd = volttron_instance.volttron_home
    database_file = "/".join([cwd, "agents", weather_uuid, "darkskyagent-" + version, "darkskyagent-" + version +
                              ".agent-data", "weather.sqlite"])
    sqlite_connection = sqlite3.connect(database_file)
    cursor = sqlite_connection.cursor()

    for i in range(0, 100):
        time = format_timestamp(get_aware_utc_now() + timedelta(seconds=i))
        insert_query = """INSERT INTO API_CALLS (CALL_TIME) VALUES (?);"""
        cursor.execute(insert_query, (time,))
    sqlite_connection.commit()

    locations = [{"lat": 39.7555, "long": -105.2211}]
    query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', locations).get(timeout=30)

    assert query_data[0]['weather_error'] == 'No calls currently available for the configured API key'
    assert not query_data[0].get('weather_results')

    query_data = query_agent.vip.rpc.call(identity, 'get_hourly_forecast', locations).get(timeout=30)

    assert query_data[0]['weather_error'] == 'No calls currently available for the configured API key'
    assert not query_data[0].get('weather_results')

    delete_query = "DROP TABLE IF EXISTS API_CALLS;"
    cursor.execute(delete_query)

    create_query = """CREATE TABLE API_CALLS (CALL_TIME TIMESTAMP NOT NULL);"""
    cursor.execute(create_query)
    sqlite_connection.commit()
示例#41
0
 def publish_record(self, topic_suffix, message):
     headers = {
         headers_mod.DATE: utils.format_timestamp(utils.get_aware_utc_now())
     }
     message["TimeStamp"] = utils.format_timestamp(self.current_datetime)
     topic = "/".join([self.record_topic, topic_suffix])
     self.vip.pubsub.publish("pubsub", topic, headers, message).get()
示例#42
0
    def _capture_log_data(self, peer, sender, bus, topic, headers, message):
        """Capture log data and submit it to be published by a historian."""
        try:
            data = self._clean_compat(sender, topic, headers, message)
        except:
            return

        for point, item in data.iteritems():
            if 'Readings' not in item or 'Units' not in item:
                _log.error("logging request for {topic} missing Readings "
                           "or Units".format(topic=topic))
                continue
            units = item['Units']
            dtype = item.get('data_type', 'float')
            tz = item.get('tz', None)
            if dtype == 'double':
                dtype = 'float'

            meta = {'units': units, 'type': dtype}

            readings = item['Readings']

            if not isinstance(readings, list):
                readings = [(get_aware_utc_now(), readings)]
            elif isinstance(readings[0], str):
                my_ts, my_tz = process_timestamp(readings[0], topic)
                readings = [(my_ts, readings[1])]
                if tz:
                    meta['tz'] = tz
                elif my_tz:
                    meta['tz'] = my_tz

            self._add_to_cache(topic, point, readings[1])
示例#43
0
    def _capture_data(self, peer, sender, bus, topic, headers, message,
                      device):
        timestamp_string = headers.get(headers_mod.DATE, None)
        timestamp = get_aware_utc_now()
        if timestamp_string is not None:
            timestamp, my_tz = process_timestamp(timestamp_string, topic)
        try:
            # 2.0 agents compatability layer makes sender == pubsub.compat so
            # we can do the proper thing when it is here
            message = self._clean_compat(sender, topic, headers, message)
        except Exception as e:
            _log.exception(e)
            return
        try:
            if isinstance(message, dict):
                values = message
            else:
                values = message[0]
        except Exception as e:
            _log.exception(e)
            return

        if topic.startswith('analysis'):
            source = 'analysis'
        else:
            source = 'scrape'
        _log.debug("Queuing {topic} from {source} for publish".format(
            topic=topic, source=source))

        for key, value in values.iteritems():
            self._add_to_cache(device, key, value)
示例#44
0
    def update_override_patterns(self):
        if self._override_patterns is None:
            try:
                values = self.vip.config.get("override_patterns")
                values = jsonapi.loads(values)

                if isinstance(values, dict):
                    self._override_patterns = set()
                    for pattern, end_time in values.items():
                        # check the end_time
                        now = utils.get_aware_utc_now()
                        # If end time is indefinite, set override with indefinite duration
                        if end_time == "0.0":
                            self._set_override_on(pattern,
                                                  0.0,
                                                  from_config_store=True)
                        else:
                            end_time = utils.parse_timestamp_string(end_time)
                            # If end time > current time, set override with new duration
                            if end_time > now:
                                delta = end_time - now
                                self._set_override_on(pattern,
                                                      delta.total_seconds(),
                                                      from_config_store=True)
                else:
                    self._override_patterns = set()
            except KeyError:
                self._override_patterns = set()
            except ValueError:
                _log.error(
                    "Override patterns is not set correctly in config store")
                self._override_patterns = set()
示例#45
0
    def report_sample_telemetry(self):
        """
            At regular intervals, send sample metrics to the VEN agent as an RPC.

            Send measurements that simulate the following:
                - Constant baseline power
                - Measured power that is a sine wave with amplitude = baseline power
        """
        def sine_wave(t, p):
            """Return the current value at time t of a sine wave from -1 to 1 with period p."""
            seconds_since_hour = (60.0 * int(t.strftime('%M'))) + int(
                t.strftime('%S'))
            fraction_into_period = (seconds_since_hour % float(p)) / float(p)
            return numpy.sin(2 * numpy.pi * fraction_into_period)

        end_time = utils.get_aware_utc_now()
        start_time = end_time - timedelta(seconds=self.report_interval_secs)
        val = sine_wave(end_time, self.sine_period_secs)
        # Adjust the sine wave upward so that all values are positive, with amplitude = self.baseline_power_kw.
        measurement_kw = self.baseline_power_kw * ((val + 1) / 2)
        self.report_telemetry({
            'baseline_power_kw': str(self.baseline_power_kw),
            'current_power_kw': str(measurement_kw),
            'start_time': start_time.__str__(),
            'end_time': end_time.__str__()
        })
示例#46
0
 def build_metadata_oadr_report(self, report):
     descriptions = []
     for tel_vals in jsonapi.loads(report.telemetry_parameters).values():
         # Rule 305: For TELEMETRY_USAGE reports, units in reportDescription.itemBase should be powerReal.
         if tel_vals['units'] == 'powerReal':
             item_base = oadr_20b.PowerRealType(itemDescription='RealPower',
                                                itemUnits='W',
                                                siScaleCode=None,
                                                powerAttributes=None)
         else:
             item_base = None
         min_freq, max_freq = tel_vals['min_frequency'], tel_vals[
             'max_frequency']
         desc = oadr_20b.oadrReportDescriptionType(
             rID=tel_vals['r_id'],
             reportType=tel_vals['report_type'],
             readingType=tel_vals['reading_type'],
             itemBase=item_base,
             oadrSamplingRate=self.build_sampling_rate(min_freq, max_freq))
         descriptions.append(desc)
     rpt_interval_duration = isodate.duration_isoformat(
         timedelta(seconds=report.interval_secs))
     return oadr_20b.oadrReportType(
         duration=oadr_20b.DurationPropType(rpt_interval_duration),
         oadrReportDescription=descriptions,
         reportRequestID=None,
         reportSpecifierID=report.report_specifier_id,
         reportName=report.name,
         createdDateTime=utils.get_aware_utc_now())
示例#47
0
 def _update_override_interval(self, interval, pattern):
     if interval <= 0.0:     # indicative of indefinite duration
         if pattern in self._override_interval_events:
             # If override duration is indifinite, do nothing
             if self._override_interval_events[pattern] is None:
                 return False
             else:
                 # Cancel the old event
                 evt = self._override_interval_events.pop(pattern)
                 evt[0].cancel()
         self._override_interval_events[pattern] = None
         return True
     else:
         override_start = utils.get_aware_utc_now()
         override_end = override_start + timedelta(seconds=interval)
         if pattern in self._override_interval_events:
             evt = self._override_interval_events[pattern]
             # If event is indefinite or greater than new end time, do nothing
             if evt is None or override_end < evt[1]:
                 return False
             else:
                 evt = self._override_interval_events.pop(pattern)
                 evt[0].cancel()
         # Schedule new override event
         event = self.core.schedule(override_end, self._cancel_override, pattern)
         self._override_interval_events[pattern] = (event, override_end)
         return True
示例#48
0
 def format_multientry_response(self, location, response, service,
                                timezone):
     """
     Used to extract the data not used by the RPC method, and store it in
     the cache, helping to limit the number of API calls used to obtain data
     :param location: location dictionary to include with cached data
     :param response: Darksky forecast response
     :param service:
     :param timezone: timezone string extracted from Darksky response
     :return: formatted response data by service
     """
     data = []
     generation_time = self.get_generation_time_for_service(service)
     for entry in response['data']:
         entry_time = datetime.datetime.fromtimestamp(
             entry['time'], pytz.timezone(timezone))
         entry_time = entry_time.astimezone(pytz.utc)
         if entry_time > utils.get_aware_utc_now():
             if SERVICES_MAPPING[service]['type'] is 'forecast':
                 data.append([
                     jsonapi.dumps(location), generation_time, entry_time,
                     jsonapi.dumps(entry)
                 ])
             else:
                 data.append([
                     jsonapi.dumps(location), entry_time,
                     jsonapi.dumps(entry)
                 ])
     return data
示例#49
0
    def reset_time(self, peer, sender, bus, topic, headers, message):
        """Callback for topic subscriptions

        Resets the timeout for topics and devices when publishes are received.
        """
        up_time = get_aware_utc_now()
        # TODO: What is the use case for this IF STMT
        # topic should always be there?? Ask Craig
        if topic not in self.wait_time:
            found = False
            # if topic isn't in wait time we need to figure out the
            # prefix topic so that we can determine the wait time
            for x in self.wait_time:
                # TODO: order the wait_time topics so furthest down the tree wins.
                if topic.startswith(x):
                    topic = x
                    found = True
                    break
            if not found:
                _log.debug("No configured topic prefix for topic {}".format(
                    topic))
                return

        log_topics = set()
        # Reset the standard topic timeout
        self.topic_ttl[topic] = self.wait_time[topic]
        self.last_seen[topic] = get_aware_utc_now()
        if topic in self.unseen_topics:
            self.unseen_topics.remove(topic)
            # log time we saw topic only if we had earlier recorded a timeout
            log_topics.add(topic)

        # Reset timeouts on volatile points
        if topic in self.point_ttl:
            received_points = set(message[0].keys())
            expected_points = self.point_ttl[topic].keys()
            for point in expected_points:
                if point in received_points:
                    self.point_ttl[topic][point] = self.wait_time[topic]
                    self.last_seen[(topic, point)] = get_aware_utc_now()
                    if (topic, point) in self.unseen_topics:
                        self.unseen_topics.remove((topic, point))
                        log_topics.add((topic, point))

        if log_topics:
            self.log_time_up(up_time, log_topics)
示例#50
0
 def onstop(self, sender, **kwargs):
     c = self._connection.cursor()
     c.execute("UPDATE agent_log set stop_time = ? "
               " WHERE start_time = (SELECT max(start_time) from agent_log)",
               (get_aware_utc_now(),))
     c.close()
     self._connection.commit()
     self._connection.close()
示例#51
0
 def __init__(self, grace_time, now=None, save_state_callback=None, initial_state_string=None):
     self.tasks = {}
     self.running_tasks = set()
     self.preemted_tasks = set()
     self.set_grace_period(grace_time)
     self.save_state_callback = save_state_callback
     if now is None:
         now = utils.get_aware_utc_now()
     self.load_state(now, initial_state_string)
示例#52
0
    def _capture_log_data(self, peer, sender, bus, topic, headers, message):
        """Capture log data and submit it to be published by a historian."""

        # Anon the topic if necessary.
        topic = self._get_topic(topic)
        try:
            # 2.0 agents compatability layer makes sender == pubsub.compat so 
            # we can do the proper thing when it is here
            if sender == 'pubsub.compat':
                data = compat.unpack_legacy_message(headers, message)
            else:
                data = message
        except ValueError as e:
            _log.error("message for {topic} bad message string: "
                       "{message_string}".format(topic=topic,
                                                 message_string=message[0]))
            return
        except IndexError as e:
            _log.error("message for {topic} missing message string".format(
                topic=topic))
            return

        source = 'log'
        _log.debug(
            "Queuing {topic} from {source} for publish".format(topic=topic,
                                                               source=source))
        _log.debug(data)
        for point, item in data.iteritems():
            #             ts_path = location + '/' + point
            if 'Readings' not in item or 'Units' not in item:
                _log.error("logging request for {topic} missing Readings "
                           "or Units".format(topic=topic))
                continue
            units = item['Units']
            dtype = item.get('data_type', 'float')
            tz = item.get('tz', None)
            if dtype == 'double':
                dtype = 'float'

            meta = {'units': units, 'type': dtype}

            readings = item['Readings']

            if not isinstance(readings, list):
                readings = [(get_aware_utc_now(), readings)]
            elif isinstance(readings[0], str):
                my_ts, my_tz = process_timestamp(readings[0], topic)
                readings = [(my_ts, readings[1])]
                if tz:
                    meta['tz'] = tz
                elif my_tz:
                    meta['tz'] = my_tz

            self._event_queue.put({'source': source,
                                   'topic': topic + '/' + point,
                                   'readings': readings,
                                   'meta': meta})
示例#53
0
    def initialize_clock(self, simulated_start_time, simulated_stop_time=None, speed=None):
        """
            Start a simulation by furnishing start/stop times and a clock speed.

            If no simulated_stop_time is supplied, the simulation will run
            until another simulation is started or the agent is stopped.

            If no speed is supplied, the simulated clock speed will be the same as
            the wall clock (real-time) speed.

            The confirmation message that is returned indicates the wall clock (real)
            time when the simulation started.

        @param simulated_start_time: The simulated-clock time at which the simulation will start.
        @param simulated_stop_time: The simulated-clock time at which the simulation will stop (can be None).
        @param speed: A multiplier (float) that makes the simulation run faster or slower than real time.
        @return: A string, either an error message or a confirmation that the simulation has started.
        """
        try:
            parsed_start_time = utils.parse_timestamp_string(simulated_start_time)
        except ValueError:
            _log.debug('Failed to parse simulated_start_time {}'.format(simulated_start_time))
            return 'Invalid simulated_start_time'

        if simulated_stop_time:
            try:
                parsed_stop_time = utils.parse_timestamp_string(simulated_stop_time)
            except ValueError:
                _log.debug('Failed to parse simulated_stop_time {}'.format(simulated_stop_time))
                return 'Invalid simulated_stop_time'
        else:
            parsed_stop_time = None

        if speed is not None:
            try:
                parsed_speed = float(speed)
            except ValueError:
                _log.debug('Failed to parse speed {}'.format(speed))
                return 'Invalid speed'
            if speed <= 0.0:
                _log.debug('Asked to initialize with a zero or negative speed')
                return 'Asked to initialize with a zero or negative speed'
        else:
            parsed_speed = 1.0

        if parsed_stop_time and (parsed_stop_time < parsed_start_time):
            _log.debug('Asked to initialize with out-of-order start/stop times')
            return 'simulated_stop_time is earlier than simulated_start_time'

        self.actual_start_time = utils.get_aware_utc_now()
        self.simulated_start_time = parsed_start_time
        self.simulated_stop_time = parsed_stop_time
        self.speed = parsed_speed
        _log.debug('Initializing clock at {} to start at: {}'.format(self.actual_start_time, self.simulated_start_time))
        _log.debug('Initializing clock to stop at:  {}'.format(self.simulated_stop_time))
        _log.debug('Initializing clock to run at: {} times normal'.format(self.speed))
        return 'Simulation started at {}'.format(self.actual_start_time)
示例#54
0
    def _capture_data(self, peer, sender, bus, topic, headers, message,
                      device):

        # Anon the topic if necessary.
        topic = self._get_topic(topic)
        timestamp_string = headers.get(headers_mod.DATE, None)
        timestamp = get_aware_utc_now()
        if timestamp_string is not None:
            timestamp, my_tz = process_timestamp(timestamp_string, topic)
        _log.debug("### In capture_data timestamp str {} ".format(timestamp))
        try:
            _log.debug(
                "### In capture_data Actual message {} ".format(message))
            # 2.0 agents compatability layer makes sender == pubsub.compat so
            # we can do the proper thing when it is here
            if sender == 'pubsub.compat':
                # message = jsonapi.loads(message[0])
                message = compat.unpack_legacy_message(headers, message)
                _log.debug("### message after compat {}".format(message))

            if isinstance(message, dict):
                values = message
            else:
                values = message[0]

        except ValueError as e:
            _log.error("message for {topic} bad message string: "
                       "{message_string}".format(topic=topic,
                                                 message_string=message[0]))
            return
        except IndexError as e:
            _log.error("message for {topic} missing message string".format(
                topic=topic))
            return
        except Exception as e:
            _log.exception(e)
            return

        meta = {}
        if not isinstance(message, dict):
            meta = message[1]

        if topic.startswith('analysis'):
            source = 'analysis'
        else:
            source = 'scrape'
        _log.debug(
            "Queuing {topic} from {source} for publish".format(topic=topic,
                                                               source=source))

        for key, value in values.iteritems():
            point_topic = device + '/' + key
            self._event_queue.put({'source': source,
                                   'topic': point_topic,
                                   'readings': [(timestamp, value)],
                                   'meta': meta.get(key, {})})
示例#55
0
def publish_minute_data_for_two_hours(agent):
    now = get_aware_utc_now()
    # expection[datetime]={oat:b,mixed:c,damper:d}
    expectation = {}

    for h in xrange(2):
        data_by_time = {}

        for m in xrange(60):
            # Because timestamps in mongo are only concerned with the first
            #  three digits after the decimal we do this to give some
            # randomness here.
            myint = random.randint(0, 1000)
            mymicro = str(myint)+'000'

            now = datetime(now.year, now.month, now.day, h, m,
                           random.randint(0, 59), int(mymicro))
            # Make some random readings
            oat_reading = random.uniform(30, 100)
            mixed_reading = oat_reading + random.uniform(-5, 5)
            damper_reading = random.uniform(0, 100)

            # Create a message for all points.
            all_message = [{
                'OutsideAirTemperature': oat_reading,
                'MixedAirTemperature': mixed_reading,
                'DamperSignal': damper_reading},
                {'OutsideAirTemperature':
                     {'units': 'F', 'tz': 'UTC', 'type': 'float'},
                 'MixedAirTemperature': {'units': 'F', 'tz': 'UTC', 'type': 'float'},
                 'DamperSignal': {'units': '%', 'tz': 'UTC', 'type': 'float'}
                 }]

            now_iso_string = format_timestamp(now)
            data_by_time[now_iso_string] = {
                "oat_point": oat_reading,
                "mixed_point": mixed_reading,
                "damper_point": damper_reading
            }

            # now = '2015-12-02T00:00:00'
            headers = {
                headers_mod.DATE: now_iso_string
            }

            # Publish messages
            agent.vip.pubsub.publish(
                    'pubsub', ALL_TOPIC, headers, all_message).get(timeout=10)

            expectation[now] = {
                query_points['oat_point']: oat_reading,
                query_points['mixed_point']: mixed_reading,
                query_points['damper_point']: damper_reading
            }
    gevent.sleep(0.1)
    return expectation
示例#56
0
    def publish_response(self, resp_topic, publish_items):
        headers = {
            HEADER_NAME_DATE: format_timestamp(utils.get_aware_utc_now()),
            HEADER_NAME_CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON
        }

        self.vip.pubsub.publish(peer='pubsub',
                                topic=resp_topic,
                                message=publish_items,
                                headers=headers)
示例#57
0
    def test_event_activation(self, test_agent):
        """
            Test event activation at its start_time.

            Time the test so that the event's start_time arrives. Confirm the event's state change.

        @param test_agent: This test agent.
        """
        self.vtn_request_variable_event('6', utils.get_aware_utc_now(), 60 * 60 * 24)
        assert self.get_event_dict(test_agent, '6').get('status') == 'active'
示例#58
0
    def test_event_completion(self, test_agent):
        """
            Test event completion at its end_time.

            Time the test so that the event's end_time arrives. Confirm the event's state change.

        @param test_agent: This test agent.
        """
        self.vtn_request_variable_event('7', utils.get_aware_utc_now(), 1)
        assert self.get_event_dict(test_agent, '7').get('status') == 'completed'