def test_elapsed_time(self, agent):
        """Confirm that correct simulated times are returned."""
        sim_start_time = utils.parse_timestamp_string('2017-01-01 08:00')
        sim_stop_time = utils.parse_timestamp_string('2017-01-01 10:00')
        clock_speed = 10.0
        response = self.start_simulation(agent, str(sim_start_time),
                                         str(sim_stop_time), str(10.0))
        actual_start_time = datetime.now()
        assert 'started' in response

        time.sleep(2)
        response = self.get_time(agent)
        assert type(response) != str
        elapsed_simulated_seconds = (datetime.now() -
                                     actual_start_time).seconds * clock_speed
        simulation_timestamp = sim_start_time + timedelta(
            seconds=elapsed_simulated_seconds)
        assert str(response) == str(simulation_timestamp)

        time.sleep(2)
        response = self.get_time(agent)
        assert type(response) != str
        elapsed_simulated_seconds = (datetime.now() -
                                     actual_start_time).seconds * clock_speed
        simulation_timestamp = sim_start_time + timedelta(
            seconds=elapsed_simulated_seconds)
        assert str(response) == str(simulation_timestamp)
Beispiel #2
0
    def calculate_soc(self, sim_time, power_kw):
        """
            Calculate and return the current state of charge (SOC).

            The new SOC is based on SOC, current power, elapsed time and max SOC.

        :param sim_time: (str) Current time on the simulation clock.
        :param power_kw: (float) Current charge/discharge power.
        :return: (float) The new SOC value in kWh.
        """
        elapsed_time_hrs = 0.0
        if sim_time and self.old_timestamp:
            try:
                new_time = utils.parse_timestamp_string(sim_time)
                old_time = utils.parse_timestamp_string(self.old_timestamp)
                if new_time > old_time:
                    elapsed_time_hrs = (new_time -
                                        old_time).total_seconds() / 3600.0
            except ValueError:
                pass
        new_soc = self.get_register_value('soc_kwh') + (power_kw *
                                                        elapsed_time_hrs)
        new_soc = min(max(new_soc, 0.0),
                      self.get_register_value('max_soc_kwh'))
        new_soc = int(1000 * new_soc) / 1000.0  # Round to nearest thousandth
        return new_soc
Beispiel #3
0
def test_get_thermostat_data_success(mock_ecobee):
    mock_ecobee.configure(VALID_ECOBEE_CONFIG, VALID_ECOBEE_REGISTRY)
    assert mock_ecobee.thermostat_data == REMOTE_RESPONSE
    data_cache = mock_ecobee.cache.get('https://api.ecobee.com/1/thermostat')
    curr_timestamp = utils.parse_timestamp_string(
        data_cache.get("request_timestamp"))

    # Check that we get cached data when possible
    mock_ecobee.get_thermostat_data()
    assert mock_ecobee.thermostat_data == REMOTE_RESPONSE
    data_cache = mock_ecobee.cache.get('https://api.ecobee.com/1/thermostat')
    refresh_timestamp = utils.parse_timestamp_string(
        data_cache.get("request_timestamp"))
    assert refresh_timestamp == curr_timestamp

    # cause a request_tokens request to occur during get_ecobee_data
    mock_ecobee.authorization_code = True
    mock_ecobee.refresh_token = True
    mock_ecobee.access_token = True
    mock_ecobee.ecobee_data = None
    cleanup_mock_cache(mock_ecobee)
    mock_ecobee.get_thermostat_data()
    assert mock_ecobee.thermostat_data == REMOTE_RESPONSE
    data_cache = mock_ecobee.cache.get('https://api.ecobee.com/1/thermostat')
    refresh_timestamp = utils.parse_timestamp_string(
        data_cache.get("request_timestamp"))
    assert refresh_timestamp > curr_timestamp

    # should handle having to get a new refresh token and still fetch data
    mock_ecobee.access_token = False
    mock_ecobee.authorization_stage = "REFRESH_TOKENS"
    mock_ecobee.ecobee_data = None
    cleanup_mock_cache(mock_ecobee)
    mock_ecobee.get_thermostat_data()
    assert mock_ecobee.thermostat_data == REMOTE_RESPONSE
    assert mock_ecobee.access_token is True
    data_cache = mock_ecobee.cache.get('https://api.ecobee.com/1/thermostat')
    refresh_timestamp = utils.parse_timestamp_string(
        data_cache.get("request_timestamp"))
    assert refresh_timestamp > curr_timestamp

    # should handle having to get a new refresh token and still fetch data
    mock_ecobee.refresh_token = False
    mock_ecobee.access_token = False
    mock_ecobee.authorization_stage = "REQUEST_TOKENS"
    mock_ecobee.ecobee_data = None
    cleanup_mock_cache(mock_ecobee)
    mock_ecobee.get_thermostat_data()
    assert mock_ecobee.thermostat_data == REMOTE_RESPONSE
    assert mock_ecobee.access_token is True
    assert mock_ecobee.refresh_token is True

    # now should pull from cache again
    data_cache = mock_ecobee.cache.get('https://api.ecobee.com/1/thermostat')
    timestamp = data_cache.get("request_timestamp")
    mock_ecobee.get_thermostat_data()
    data_cache = mock_ecobee.cache.get('https://api.ecobee.com/1/thermostat')
    next_timestamp = data_cache.get("request_timestamp")
    assert timestamp == next_timestamp
Beispiel #4
0
    def validate_config(self):
        """
            Validate the data types and, in some cases, the value ranges or values of the config parameters.

            This is mostly just validation, but it also has a side-effect of expanding shell
            variables or user directory references (~) in the configured pathnames.
        """
        assert type(self.agent_id) is str
        assert type(self.heartbeat_period) is int
        assert type(self.positive_dispatch_kw) is float
        assert self.positive_dispatch_kw >= 0.0
        assert type(self.negative_dispatch_kw) is float
        assert self.negative_dispatch_kw <= 0.0
        assert type(self.go_positive_if_below) is float
        assert 0.0 <= self.go_positive_if_below <= 1.0
        assert type(self.go_negative_if_above) is float
        assert 0.0 <= self.go_negative_if_above <= 1.0
        if self.sim_start:
            assert type(parse_timestamp_string(self.sim_start)) is datetime
        if self.sim_end:
            assert type(parse_timestamp_string(self.sim_end)) is datetime
        assert type(self.sim_speed) is float
        assert type(self.report_interval) is int
        assert type(self.report_file_path) is str
        self.report_file_path = os.path.expandvars(os.path.expanduser(self.report_file_path))
        assert type(self.sim_driver_list) is list

        if self.simload:
            assert type(self.load_timestamp_column_header) is str
            assert type(self.load_power_column_header) is str
            assert type(self.load_data_frequency_min) is int
            assert type(self.load_data_year) is str
            assert type(self.load_csv_file_path) is str
            self.load_csv_file_path = os.path.expandvars(os.path.expanduser(self.load_csv_file_path))
            _log.debug('Testing for existence of {}'.format(self.load_csv_file_path))
            assert os.path.exists(self.load_csv_file_path)

        if self.simpv:
            assert type(self.pv_panel_area) is float
            assert type(self.pv_efficiency) is float
            assert 0.0 <= self.pv_efficiency <= 1.0
            assert type(self.pv_data_frequency_min) is int
            assert type(self.pv_data_year) is str
            assert type(self.pv_csv_file_path) is str
            _log.debug('Testing for existence of {}'.format(self.pv_csv_file_path))
            self.pv_csv_file_path = os.path.expandvars(os.path.expanduser(self.pv_csv_file_path))
            assert os.path.exists(self.pv_csv_file_path)

        if self.simstorage:
            assert type(self.storage_soc_kwh) is float
            assert type(self.storage_max_soc_kwh) is float
            assert type(self.storage_max_charge_kw) is float
            assert type(self.storage_max_discharge_kw) is float
            assert type(self.storage_reduced_charge_soc_threshold) is float
            assert 0.0 <= self.storage_reduced_charge_soc_threshold <= 1.0
            assert type(self.storage_reduced_discharge_soc_threshold) is float
            assert 0.0 <= self.storage_reduced_discharge_soc_threshold <= 1.0
    def validate_config(self):
        """
            Validate the data types and, in some cases, the value ranges or values of the config parameters.

            This is mostly just validation, but it also has a side-effect of expanding shell
            variables or user directory references (~) in the configured pathnames.
        """
        assert type(self.agent_id) is str
        assert type(self.heartbeat_period) is int
        assert type(self.positive_dispatch_kw) is float
        assert self.positive_dispatch_kw >= 0.0
        assert type(self.negative_dispatch_kw) is float
        assert self.negative_dispatch_kw <= 0.0
        assert type(self.go_positive_if_below) is float
        assert 0.0 <= self.go_positive_if_below <= 1.0
        assert type(self.go_negative_if_above) is float
        assert 0.0 <= self.go_negative_if_above <= 1.0
        assert type(parse_timestamp_string(self.sim_start)) is datetime
        if self.sim_end:
            assert type(parse_timestamp_string(self.sim_end)) is datetime
        assert type(self.sim_speed) is float
        assert type(self.report_interval) is int
        assert type(self.report_file_path) is str
        self.report_file_path = os.path.expandvars(os.path.expanduser(self.report_file_path))
        assert type(self.sim_driver_list) is list

        if 'simload' in self.sim_driver_list:
            assert type(self.load_timestamp_column_header) is str
            assert type(self.load_power_column_header) is str
            assert type(self.load_data_frequency_min) is int
            assert type(self.load_data_year) is str
            assert type(self.load_csv_file_path) is str
            self.load_csv_file_path = os.path.expandvars(os.path.expanduser(self.load_csv_file_path))
            _log.debug('Testing for existence of {}'.format(self.load_csv_file_path))
            assert os.path.exists(self.load_csv_file_path)

        if 'simpv' in self.sim_driver_list:
            assert type(self.pv_panel_area) is float
            assert type(self.pv_efficiency) is float
            assert 0.0 <= self.pv_efficiency <= 1.0
            assert type(self.pv_data_frequency_min) is int
            assert type(self.pv_data_year) is str
            assert type(self.pv_csv_file_path) is str
            _log.debug('Testing for existence of {}'.format(self.pv_csv_file_path))
            self.pv_csv_file_path = os.path.expandvars(os.path.expanduser(self.pv_csv_file_path))
            assert os.path.exists(self.pv_csv_file_path)

        if 'simstorage' in self.sim_driver_list:
            assert type(self.storage_soc_kwh) is float
            assert type(self.storage_max_soc_kwh) is float
            assert type(self.storage_max_charge_kw) is float
            assert type(self.storage_max_discharge_kw) is float
            assert type(self.storage_reduced_charge_soc_threshold) is float
            assert 0.0 <= self.storage_reduced_charge_soc_threshold <= 1.0
            assert type(self.storage_reduced_discharge_soc_threshold) is float
            assert 0.0 <= self.storage_reduced_discharge_soc_threshold <= 1.0
            assert type(self.storage_setpoint_rule) is str
    def initialize_clock(self, simulated_start_time, simulated_stop_time=None, speed=None):
        """
            Start a simulation by furnishing start/stop times and a clock speed.

            If no simulated_stop_time is supplied, the simulation will run
            until another simulation is started or the agent is stopped.

            If no speed is supplied, the simulated clock speed will be the same as
            the wall clock (real-time) speed.

            The confirmation message that is returned indicates the wall clock (real)
            time when the simulation started.

        @param simulated_start_time: The simulated-clock time at which the simulation will start.
        @param simulated_stop_time: The simulated-clock time at which the simulation will stop (can be None).
        @param speed: A multiplier (float) that makes the simulation run faster or slower than real time.
        @return: A string, either an error message or a confirmation that the simulation has started.
        """
        try:
            parsed_start_time = utils.parse_timestamp_string(simulated_start_time)
        except ValueError:
            _log.debug('Failed to parse simulated_start_time {}'.format(simulated_start_time))
            return 'Invalid simulated_start_time'

        if simulated_stop_time:
            try:
                parsed_stop_time = utils.parse_timestamp_string(simulated_stop_time)
            except ValueError:
                _log.debug('Failed to parse simulated_stop_time {}'.format(simulated_stop_time))
                return 'Invalid simulated_stop_time'
        else:
            parsed_stop_time = None

        if speed is not None:
            try:
                parsed_speed = float(speed)
            except ValueError:
                _log.debug('Failed to parse speed {}'.format(speed))
                return 'Invalid speed'
            if speed <= 0.0:
                _log.debug('Asked to initialize with a zero or negative speed')
                return 'Asked to initialize with a zero or negative speed'
        else:
            parsed_speed = 1.0

        if parsed_stop_time and (parsed_stop_time < parsed_start_time):
            _log.debug('Asked to initialize with out-of-order start/stop times')
            return 'simulated_stop_time is earlier than simulated_start_time'

        self.actual_start_time = utils.get_aware_utc_now()
        self.simulated_start_time = parsed_start_time
        self.simulated_stop_time = parsed_stop_time
        self.speed = parsed_speed
        _log.debug('Initializing clock at {} to start at: {}'.format(self.actual_start_time, self.simulated_start_time))
        _log.debug('Initializing clock to stop at:  {}'.format(self.simulated_stop_time))
        _log.debug('Initializing clock to run at: {} times normal'.format(self.speed))
        return 'Simulation started at {}'.format(self.actual_start_time)
    def initialize_clock(self, simulated_start_time, simulated_stop_time=None, speed=None):
        """
            Start a simulation by furnishing start/stop times and a clock speed.

            If no simulated_stop_time is supplied, the simulation will run
            until another simulation is started or the agent is stopped.

            If no speed is supplied, the simulated clock speed will be the same as
            the wall clock (real-time) speed.

            The confirmation message that is returned indicates the wall clock (real)
            time when the simulation started.

        @param simulated_start_time: The simulated-clock time at which the simulation will start.
        @param simulated_stop_time: The simulated-clock time at which the simulation will stop (can be None).
        @param speed: A multiplier (float) that makes the simulation run faster or slower than real time.
        @return: A string, either an error message or a confirmation that the simulation has started.
        """
        try:
            parsed_start_time = utils.parse_timestamp_string(simulated_start_time)
        except ValueError:
            _log.debug('Failed to parse simulated_start_time {}'.format(simulated_start_time))
            return 'Invalid simulated_start_time'

        if simulated_stop_time:
            try:
                parsed_stop_time = utils.parse_timestamp_string(simulated_stop_time)
            except ValueError:
                _log.debug('Failed to parse simulated_stop_time {}'.format(simulated_stop_time))
                return 'Invalid simulated_stop_time'
        else:
            parsed_stop_time = None

        if speed is not None:
            try:
                parsed_speed = float(speed)
            except ValueError:
                _log.debug('Failed to parse speed {}'.format(speed))
                return 'Invalid speed'
            if speed <= 0.0:
                _log.debug('Asked to initialize with a zero or negative speed')
                return 'Asked to initialize with a zero or negative speed'
        else:
            parsed_speed = 1.0

        if parsed_stop_time and (parsed_stop_time < parsed_start_time):
            _log.debug('Asked to initialize with out-of-order start/stop times')
            return 'simulated_stop_time is earlier than simulated_start_time'

        self.actual_start_time = utils.get_aware_utc_now()
        self.simulated_start_time = parsed_start_time
        self.simulated_stop_time = parsed_stop_time
        self.speed = parsed_speed
        _log.debug('Initializing clock at {} to start at: {}'.format(self.actual_start_time, self.simulated_start_time))
        _log.debug('Initializing clock to stop at:  {}'.format(self.simulated_stop_time))
        _log.debug('Initializing clock to run at: {} times normal'.format(self.speed))
        return 'Simulation started at {}'.format(self.actual_start_time)
Beispiel #8
0
    def get_devices(self):
        cp = deepcopy(self._devices)
        foundbad = False

        for k, v in cp.items():
            dt = parse_timestamp_string(v['last_published_utc'])
            dtnow = get_aware_utc_now()
            if dt+datetime.timedelta(minutes=5) < dtnow:
                v['health'] = Status.build(
                    BAD_STATUS,
                    'Too long between publishes for {}'.format(k)).as_dict()
                foundbad = True
            else:
                v['health'] = Status.build(GOOD_STATUS).as_dict()

        if len(cp):
            if foundbad:
                self.vip.health.set_status(
                    BAD_STATUS,
                    'At least one device has not published in 5 minutes')
            else:
                self.vip.health.set_status(
                    GOOD_STATUS,
                    'All devices publishing normally.'
                )
        return cp
Beispiel #9
0
    def process_point(self, now, topic, min_value=None, max_value=None, output_topic=None,
                      aggregate_method=None):
        """
        This is where the magic happens.

        Introducing new or changing methods to clean/massage/introduce new data go here.

        Currently this function republishes the most recent data as is if it
        is not too old and falls within the min and max settings.

        If the most recent value is stale it uses an average of the last 30 days.

        New keyword arguments to this function can be added as needed and will be passed
        straight from the arguments to a topic in the configuration file.

        :param now:
        :param topic:
        :param min_value:
        :param max_value:
        :param output_topic:
        :param aggregate_method:
        """
        _log.debug("Processing topic: {}".format(topic))

        if output_topic is None:
            _log.error("No output topic for {}".format(topic))
            return

        # Query the data from the historian
        results = self.vip.rpc.call("platform.historian", "query", topic, "now -1d").get(timeout=5.0)

        values = results["values"]
        if not values:
            _log.error("No values for {}".format(topic))
            return

        last_timestamp, value = values[-1]
        last_timestamp = utils.parse_timestamp_string(last_timestamp)

        if now - last_timestamp > timedelta(seconds=self.period):
            _log.warning("Data used for {} is stale".format(topic))
            if aggregate_method == "avg":
                results = self.vip.rpc.call("platform.historian", "query", topic, "now -30d").get(timeout=5.0)
                values = results["values"]
                average = sum(x[1] for x in values)
                average /= len(values)
                value = average
            # Do something here to fake a better value.

        # Make sure the value is within bounds.
        if min_value is not None:
            value = max(min_value, value)

        if max_value is not None:
            value = min(max_value, value)

        #Publish the result.
        self.vip.pubsub.publish("pubsub", output_topic,
                            headers={headers.TIMESTAMP: utils.format_timestamp(now), "source": topic},
                            message=value)
Beispiel #10
0
    def update_override_patterns(self):
        if self._override_patterns is None:
            try:
                values = self.vip.config.get("override_patterns")
                values = jsonapi.loads(values)

                if isinstance(values, dict):
                    self._override_patterns = set()
                    for pattern, end_time in values.items():
                        # check the end_time
                        now = utils.get_aware_utc_now()
                        # If end time is indefinite, set override with indefinite duration
                        if end_time == "0.0":
                            self._set_override_on(pattern, 0.0, from_config_store=True)
                        else:
                            end_time = utils.parse_timestamp_string(end_time)
                            # If end time > current time, set override with new duration
                            if end_time > now:
                                delta = end_time - now
                                self._set_override_on(pattern, delta.total_seconds(), from_config_store=True)
                else:
                    self._override_patterns = set()
            except KeyError:
                self._override_patterns = set()
            except ValueError:
                _log.error("Override patterns is not set correctly in config store")
                self._override_patterns = set()
Beispiel #11
0
def main(database_name):
    db = sqlite3.connect(database_name)
    c = db.cursor()
    c.execute("select max(rowid) from data;")
    count = c.fetchone()[0]

    #Batches of 1000
    #We do this because of a bug in the sqlite implementation in python
    #which causes problems with nested cursors.
    for i in range(0, count, 1000):
        c.execute(
            "select rowid, ts from data where rowid > ? order by rowid asc limit 1000;",
            (i, ))
        rows = c.fetchall()
        print("processing rowid:", i + 1, "to", i + len(rows))

        for rowid, ts in rows:
            #Skip already converted rows.
            if "T" in ts:
                continue

            new_ts = format_timestamp(parse_timestamp_string(ts))
            c.execute("update data set ts = ? where rowid = ?;",
                      (new_ts, rowid))

        db.commit()
Beispiel #12
0
    def update_override_patterns(self):
        if self._override_patterns is None:
            try:
                values = self.vip.config.get("override_patterns")
                values = jsonapi.loads(values)

                if isinstance(values, dict):
                    self._override_patterns = set()
                    for pattern, end_time in values.items():
                        # check the end_time
                        now = utils.get_aware_utc_now()
                        # If end time is indefinite, set override with indefinite duration
                        if end_time == "0.0":
                            self._set_override_on(pattern,
                                                  0.0,
                                                  from_config_store=True)
                        else:
                            end_time = utils.parse_timestamp_string(end_time)
                            # If end time > current time, set override with new duration
                            if end_time > now:
                                delta = end_time - now
                                self._set_override_on(pattern,
                                                      delta.total_seconds(),
                                                      from_config_store=True)
                else:
                    self._override_patterns = set()
            except KeyError:
                self._override_patterns = set()
            except ValueError:
                _log.error(
                    "Override patterns is not set correctly in config store")
                self._override_patterns = set()
    def sim_time(self):
        """
            Return the current simulated timestamp.

            The current simulated timestamp (as a string) was requested from the SimulationClockAgent
            via an RPC call (see BasicRevert.scrape_all()) and stored in a register.
            Get that value from the register, parse the string, and return the datetime.

            If a simulated timestamp cannot be returned, log the reason and return None.

        :return: (datetime) The current simulated timestamp.
        """
        sim_time = None
        timestamp_string = self.get_register_value('last_timestamp')
        try:
            sim_time = utils.parse_timestamp_string(timestamp_string)
        except TypeError:
            _log.warning('No timestamp returned by simulated time agent')
        except ValueError:
            if timestamp_string == 'Past the simulation stop time':
                _log.info(timestamp_string)
            elif timestamp_string is not None:
                _log.warning(
                    'Invalid timestamp format returned by simulated time agent: {}'
                    .format(timestamp_string))
        return sim_time
Beispiel #14
0
    def get_value_async_result(self,
                               username=None,
                               password=None,
                               start_time=None,
                               end_time=None):
        if end_time is None:
            end_time = utils.get_aware_utc_now()

        url = self.url + self.interface_point_name + '/~historyQuery'

        if isinstance(start_time, str):
            start_time = utils.parse_timestamp_string(start_time)

        # becchp.com does not accept percent-encoded parameters
        # requests is not configurable to not encode (from lead dev: https://stackoverflow.com/a/23497903)
        # do it manually:
        payload = {
            'start': self.time_format(start_time),
            'end': self.time_format(end_time)
        }
        payload_str = "&".join("%s=%s" % (k, v) for k, v in payload.items())

        return grequests.get(url,
                             auth=(username, password),
                             params=payload_str)
    def adjusted_sim_time(self, data_year, minute_boundary):
        """
            Return an adjusted version of the current simulated timestamp.

            This version of the time is suitable for use during a CSV/dictionary lookup
            in which each row is normalized to a certain frequency in minutes (e.g. every 15 minutes),
            and the simulation's lookup must be adjusted to happen during the reference
            data's year.

            If an adjusted simulated timestamp cannot be returned, return None.

        :param data_year: (int) The year of the reference data.
        :param minute_boundary: (int) The reference data's frequency in minutes, e.g. 15 or 30.
        :return: (datetime) The adjusted timestamp.
        """
        normalized_time = None
        sim_time = self.sim_time()
        if sim_time:
            adjusted_minutes = (minute_boundary *
                                (sim_time.minute / minute_boundary + 1)) % 60
            timestamp_string = '{}/{}/{} {}:{}:00'.format(
                sim_time.month, sim_time.day, data_year, sim_time.hour,
                adjusted_minutes)
            try:
                normalized_time = parse_timestamp_string(timestamp_string)
            except ValueError:
                _log.warning(
                    'Unable to parse the adjusted simulation timestamp: {}'.
                    format(timestamp_string))
        return normalized_time
Beispiel #16
0
 def match_make_offer(self, peer, sender, bus, topic, headers, message):
     timestamp = utils.parse_timestamp_string(message[0])
     unformed_markets = message[1]
     decoded_message = "Timestamp: {}".format(timestamp)
     self.log_event("match_make_offer", peer, sender, bus, topic, headers,
                    decoded_message)
     self.registrations.request_offers(timestamp, unformed_markets)
Beispiel #17
0
    def process_point(self, now, topic, min_value=None, max_value=None, output_topic=None,
                      aggregate_method=None):
        """
        This is where the magic happens.

        Introducing new or changing methods to clean/massage/introduce new data go here.

        Currently this function republishes the most recent data as is if it
        is not too old and falls within the min and max settings.

        If the most recent value is stale it uses an average of the last 30 days.

        New keyword arguments to this function can be added as needed and will be passed
        straight from the arguments to a topic in the configuration file.

        :param now:
        :param topic:
        :param min_value:
        :param max_value:
        :param output_topic:
        :param aggregate_method:
        """
        _log.debug("Processing topic: {}".format(topic))

        if output_topic is None:
            _log.error("No output topic for {}".format(topic))
            return

        # Query the data from the historian
        results = self.vip.rpc.call("platform.historian", "query", topic, "now -1d").get(timeout=5.0)

        values = results["values"]
        if not values:
            _log.error("No values for {}".format(topic))
            return

        last_timestamp, value = values[-1]
        last_timestamp = utils.parse_timestamp_string(last_timestamp)

        if now - last_timestamp > timedelta(seconds=self.period):
            _log.warning("Data used for {} is stale".format(topic))
            if aggregate_method == "avg":
                results = self.vip.rpc.call("platform.historian", "query", topic, "now -30d").get(timeout=5.0)
                values = results["values"]
                average = sum(x[1] for x in values)
                average /= len(values)
                value = average
            # Do something here to fake a better value.

        # Make sure the value is within bounds.
        if min_value is not None:
            value = max(min_value, value)

        if max_value is not None:
            value = min(max_value, value)

        #Publish the result.
        self.vip.pubsub.publish("pubsub", output_topic,
                            headers={headers.TIMESTAMP: utils.format_timestamp(now), "source": topic},
                            message=value)
Beispiel #18
0
 def match_report_clear_price(self, peer, sender, bus, topic, headers, message):
     timestamp = utils.parse_timestamp_string(message[0])
     market_name = message[1]
     quantity = message[2]
     price = message[3]
     decoded_message = "Timestamp: {} Market: {} Price: {} Quantity: {}".format(timestamp, market_name, price, quantity)
     self.log_event("match_report_clear_price", peer, sender, bus, topic, headers, decoded_message)
     self.registrations.report_clear_price(timestamp, market_name, price, quantity)
 def get_time(self, agt):
     """Issue an RPC call to get the current simulated clock time."""
     response = self.issue_rpc_call(agt, 'get_time')
     try:
         parsed_response = utils.parse_timestamp_string(response)
     except ValueError:
         parsed_response = response
     return parsed_response
 def get_time(self, agt):
     """Issue an RPC call to get the current simulated clock time."""
     response = self.issue_rpc_call(agt, 'get_time')
     try:
         parsed_response = utils.parse_timestamp_string(response)
     except ValueError:
         parsed_response = response
     return parsed_response
 def on_topic(self, peer, sender, bus, topic, headers, message):
     date_header = headers.get('Date')
     d_time = utils.parse_timestamp_string(date_header) if date_header is not None else None
     for point in self.subscriptions[topic]:
         value = message[0].get(point.point_name)
         if value is not None:
             datum = point.scale_in(value)
             if datum is not None:
                 point.append(PointRecord(datum, d_time))
Beispiel #22
0
 def match_report_aggregate(self, peer, sender, bus, topic, headers, message):
     timestamp = utils.parse_timestamp_string(message[0])
     market_name = message[1]
     buyer_seller = message[2]
     aggregate_curve_points = message[3]
     decoded_message = "Timestamp: {} Market: {} {} Curve: {}".format(timestamp, market_name, buyer_seller, aggregate_curve_points)
     self.log_event("match_report_aggregate", peer, sender, bus, topic, headers, decoded_message)
     aggregate_curve = PolyLineFactory.fromTupples(aggregate_curve_points)
     self.registrations.report_aggregate(timestamp, market_name, buyer_seller, aggregate_curve)
Beispiel #23
0
 def match_report_error(self, peer, sender, bus, topic, headers, message):
     timestamp = utils.parse_timestamp_string(message[0])
     market_name = message[1]
     error_code = message[2]
     error_message = message[3]
     aux = message[4]
     decoded_message = "Timestamp: {} Market: {} Code: {} Message: {}".format(timestamp, market_name, error_code, error_message)
     self.log_event("match_report_error", peer, sender, bus, topic, headers, decoded_message)
     self.registrations.report_error(timestamp, market_name, error_code, error_message, aux)
Beispiel #24
0
def test_can_set_status(volttron_instance1):
    """ Tests the ability to change a status by sending a different status
    code.

    This test also tests that the heartbeat is received.

    :param volttron_instance1:
    :return:
    """
    global subscription_results
    subscription_results.clear()
    new_agent = volttron_instance1.build_agent(identity='test_status')
    new_agent.vip.heartbeat.start()
    orig_status = Status.from_json(new_agent.vip.health.get_status())
    assert orig_status.status == STATUS_GOOD
    assert orig_status.context is None
    assert orig_status.last_updated is not None
    print('original status: {}'.format(orig_status.as_json()))
    new_context = {
        'foo': 'A test something when wrong',
        'woah': ['blah', 'blah']
    }
    agent_prefix = 'heartbeat/Agent'
    new_agent.vip.pubsub.subscribe(peer='pubsub',
                                   prefix=agent_prefix,
                                   callback=onmessage)
    gevent.sleep(1)
    new_agent.vip.health.set_status(STATUS_BAD, new_context)
    poll_gevent_sleep(
        2,
        lambda: messages_contains_prefix(agent_prefix, subscription_results))
    new_status = Status.from_json(new_agent.vip.health.get_status())
    print('new status: {}'.format(new_status.as_json()))
    assert new_status.status == STATUS_BAD
    assert new_status.context == new_context
    assert new_status.last_updated is not None

    print("OLD IS: {}".format(orig_status.last_updated))
    print("NEW IS: {}".format(new_status.last_updated))
    old_date = parse_timestamp_string(orig_status.last_updated)
    new_date = parse_timestamp_string(new_status.last_updated)
    assert old_date < new_date
    def test_elapsed_time(self, agent):
        """Confirm that correct simulated times are returned."""
        sim_start_time = utils.parse_timestamp_string('2017-01-01 08:00')
        sim_stop_time = utils.parse_timestamp_string('2017-01-01 10:00')
        clock_speed = 10.0
        response = self.start_simulation(agent, str(sim_start_time), str(sim_stop_time), str(10.0))
        actual_start_time = datetime.now()
        assert 'started' in response

        time.sleep(2)
        response = self.get_time(agent)
        assert type(response) != str
        elapsed_simulated_seconds = (datetime.now() - actual_start_time).seconds * clock_speed
        simulation_timestamp = sim_start_time + timedelta(seconds=elapsed_simulated_seconds)
        assert str(response) == str(simulation_timestamp)

        time.sleep(2)
        response = self.get_time(agent)
        assert type(response) != str
        elapsed_simulated_seconds = (datetime.now() - actual_start_time).seconds * clock_speed
        simulation_timestamp = sim_start_time + timedelta(seconds=elapsed_simulated_seconds)
        assert str(response) == str(simulation_timestamp)
Beispiel #26
0
def test_get_point_malformed_data(mock_ecobee, point_name, expected_value):
    mock_ecobee.configure(VALID_ECOBEE_CONFIG, VALID_ECOBEE_REGISTRY)
    data_cache = mock_ecobee.cache.get('https://api.ecobee.com/1/thermostat')
    curr_timestamp = utils.parse_timestamp_string(
        data_cache.get("request_timestamp"))

    # Malformed data should cause ValueErrors, which then trigger the data to be refreshed
    mock_ecobee.thermostat_data = {}
    assert mock_ecobee.get_point(point_name) == expected_value
    data_cache = mock_ecobee.cache.get('https://api.ecobee.com/1/thermostat')
    refresh_timestamp = utils.parse_timestamp_string(
        data_cache.get("request_timestamp"))
    assert refresh_timestamp > curr_timestamp
    curr_timestamp = refresh_timestamp
    mock_ecobee.thermostat_data = {
        "thermostatsList": [{
            "identifier": 8675309,
        }]
    }
    assert mock_ecobee.get_point(point_name) == expected_value
    data_cache = mock_ecobee.cache.get('https://api.ecobee.com/1/thermostat')
    refresh_timestamp = utils.parse_timestamp_string(
        data_cache.get("request_timestamp"))
    assert refresh_timestamp > curr_timestamp
    curr_timestamp = refresh_timestamp
    mock_ecobee.thermostat_data = {
        "thermostatsList": [{
            "identifier": 8675309,
            "settings": {},
            "runtime": {},
            "events": [""]
        }]
    }
    assert mock_ecobee.get_point(point_name) == expected_value
    data_cache = mock_ecobee.cache.get('https://api.ecobee.com/1/thermostat')
    refresh_timestamp = utils.parse_timestamp_string(
        data_cache.get("request_timestamp"))
    assert refresh_timestamp > curr_timestamp
def test_can_set_status(volttron_instance1):
    """ Tests the ability to change a status by sending a different status
    code.

    This test also tests that the heartbeat is received.

    :param volttron_instance1:
    :return:
    """
    global subscription_results
    subscription_results.clear()
    new_agent = volttron_instance1.build_agent(identity='test_status')
    new_agent.vip.heartbeat.start()
    orig_status = Status.from_json(new_agent.vip.health.get_status())
    assert orig_status.status == STATUS_GOOD
    assert orig_status.context is None
    assert orig_status.last_updated is not None
    print('original status: {}'.format(orig_status.as_json()))
    new_context = {'foo': 'A test something when wrong',
                   'woah': ['blah', 'blah']}
    agent_prefix = 'heartbeat/Agent'
    new_agent.vip.pubsub.subscribe(peer='pubsub',
                                   prefix=agent_prefix, callback=onmessage)
    gevent.sleep(1)
    new_agent.vip.health.set_status(STATUS_BAD, new_context)
    poll_gevent_sleep(2, lambda: messages_contains_prefix(agent_prefix,
                                                          subscription_results))
    new_status = Status.from_json(new_agent.vip.health.get_status())
    print('new status: {}'.format(new_status.as_json()))
    assert new_status.status == STATUS_BAD
    assert new_status.context == new_context
    assert new_status.last_updated is not None

    print("OLD IS: {}".format(orig_status.last_updated))
    print("NEW IS: {}".format(new_status.last_updated))
    old_date = parse_timestamp_string(orig_status.last_updated)
    new_date = parse_timestamp_string(new_status.last_updated)
    assert old_date < new_date
Beispiel #28
0
 def new_data(self, peer, sender, bus, topic, header, message):
     """
     Call back method for curtailable device data subscription.
     :param peer:
     :param sender:
     :param bus:
     :param topic:
     :param headers:
     :param message:
     :return:
     """
     now = parse_timestamp_string(header[headers_mod.TIMESTAMP])
     data = message[0]
     self.ingest_data(topic, data, now)
Beispiel #29
0
 def get_data_cache(self, url, update_frequency):
     """
     Fetches data from cache dict if it is up to date
     :param url: URL to use to use as lookup value in cache dict
     :param update_frequency: duration in seconds for which data in cache is considered up to date
     :return: Data stored in cache if up to date, otherwise None
     """
     url_data = self.cache.get(url)
     if url_data:
         timestamp = utils.parse_timestamp_string(url_data.get("request_timestamp"))
         if (datetime.datetime.now() - timestamp).total_seconds() < update_frequency:
             return url_data.get("request_response")
         else:
             _log.info("Cached Ecobee data out of date.")
     return None
Beispiel #30
0
    def update(self):
        async_requests = [
            r.get_value_async_result(username=self.username,
                                     password=self.password,
                                     start_time=r.last_read)
            for r in self.registers
        ]
        request_results = grequests.map(async_requests, size=10)
        print(request_results)

        temp_last_read = {}
        parsed_results = defaultdict(dict)
        for r, result in zip(self.registers, request_results):
            if result is None:
                _log.debug("request failed: {}".format(
                    async_requests[request_results.index(result)]))
                continue
            parsed_result = r.parse_result(result.text)
            if parsed_result is not None:
                parsed_results[r.device_topic][r.point_name] = parsed_result
                temp_last_read[r.index] = utils.format_timestamp(
                    max(
                        utils.parse_timestamp_string(time)
                        for time in parsed_result.keys()))
        collated_results = self.collate_results(parsed_results)

        records = []
        for timestamp, record in collated_results.items():
            for topic in record.keys():
                records.append({
                    'topic': topic,
                    'message': record[topic],
                    'headers': {
                        headers_mod.DATE: timestamp
                    }
                })

        self.publish_records(records)

        _log.debug("publish successful. Saving timestamps of latest data")
        last_read = self.last_read.copy()
        for r in self.registers:
            if r.index in temp_last_read:
                last_read[r.index] = temp_last_read[r.index]
                r.last_read = temp_last_read[r.index]
        self.last_read = last_read
        self.vip.config.set("last_read", last_read)
Beispiel #31
0
    def _handle_publish(
            self, peer, sender, bus, topic, headers, message):
        """Process messages posted to message bus

        :param peer: unused
        :param sender: unused
        :param bus: unused
        :param topic: topic of message in the form of `base/point`
        :param headers: message headers including timestamp
        :param message: body of message
        """
        base_topic, _ = topic.rsplit('/', 1)
        points = message[0]

        inputs = {}
        for point, value in points.iteritems():
            point_topic = base_topic + '/' + point
            if point_topic in self.input_topics:
                inputs[point_topic] = value

        timestamp = utils.parse_timestamp_string(
            headers[headers_mod.TIMESTAMP])

        # assume unaware timestamps are UTC
        if (timestamp.tzinfo is None
                or timestamp.tzinfo.utcoffset(timestamp) is None):
            timestamp = pytz.utc.localize(timestamp)

        if inputs:
            self.model.process_inputs(timestamp, inputs)

        if self.simulation_mode:
            if self.historian_training:
                self.train_components(timestamp)

            if topic in self.remaining_simulation_inputs:
                self.remaining_simulation_inputs.remove(topic)
            else:
                LOG.warning("Duplicate inputs: {}".format(topic))

            if not self.remaining_simulation_inputs:
                LOG.info("Run triggered by all input topics receiving publish")
                # if not enough time has passed, all input topics will need
                # to be received *again*
                self.remaining_simulation_inputs = self.all_topics.copy()
                self.run_optimizer(timestamp)
Beispiel #32
0
    def get_value_async_result(self, username=None, password=None, start_time=None, end_time=None):
        if end_time is None:
            end_time = utils.get_aware_utc_now()

        url = self.url + self.interface_point_name + '/~historyQuery'

        if isinstance(start_time, str):
            start_time = utils.parse_timestamp_string(start_time)

        # becchp.com does not accept percent-encoded parameters
        # requests is not configurable to not encode (from lead dev: https://stackoverflow.com/a/23497903)
        # do it manually:
        payload = {'start': self.time_format(start_time),
                   'end': self.time_format(end_time)}
        payload_str = "&".join("%s=%s" % (k, v) for k, v in payload.items())

        return grequests.get(url, auth=(username, password), params=payload_str)
Beispiel #33
0
    def evse_datetime_set(self, value):
        """Getting datetime object"""

        register = self.get_register_by_function('Datetime')

        evse_datetime = parse_timestamp_string(value)

        #pluck numbers out of datetime object
        year = str(evse_datetime.year)[-2:]
        month = str(evse_datetime.month)
        day = str(evse_datetime.day)
        hour = str(evse_datetime.hour)
        minute = str(evse_datetime.minute)
        second = str(evse_datetime.second)

        return self._set_request(register.write_command, year, month, day,
                                 hour, minute, second)
Beispiel #34
0
    def get_live_data(self):
        """Query and parse NWS records"""
        r = requests.get(self.url)
        try:
            r.raise_for_status()
            parsed_json = r.json()
            records = parsed_json["properties"]["periods"]
        except (requests.exceptions.HTTPError, ValueError, KeyError) as e:
            LOG.error("Error retrieving weather data: " + str(e))
            raise e

        results = []
        for rec in records[:self.hours_forecast]:
            timestamp = utils.parse_timestamp_string(rec["endTime"])
            timestamp = timestamp.astimezone(pytz.UTC)
            result = {"timestamp": timestamp}
            result.update(self.get_nws_forecast_from_record(rec))
            results.append(result)
        return results
Beispiel #35
0
    def on_message(self, peer, sender, bus, topic, headers, message):
        '''Use match_all to receive all messages and print them out.'''
        #if self.count == 0:
        #    eprint(f"the max publishes is {self.max_publishes}")
        client_time = utils.get_aware_utc_now()
        utcnow_string = utils.format_timestamp(client_time)
        self.count += 1
        #eprint(
        #    "Process name: [%r], Count: [%r], Time: [%r], Peer: [%r], Sender: [%r]:, Bus: [%r], Topic: [%r], Headers: [%r], "
        #    "Message: [%r]" % (self.name, self.count, utcnow_string, peer, sender, bus, topic, headers, message))
        header_time = utils.parse_timestamp_string(headers['TimeStamp'])
        # eprint("Agent: {0}, current timestamp {1}, header timestamp {2}!".format(self.agent._agentid,
        #                                                                          utcnow_string,
        #                                                                          headers['TimeStamp']))
        self.data_list[self.publishes].append({
            'header_t': header_time.timestamp(),
            'client_t': client_time.timestamp(),
            })
        #if self.count%21 == 0 or self.count%42 == 1:
        diff = client_time - header_time
        d_float = diff.seconds + (diff.microseconds* 0.000001)
        #eprint(f"--- count [{self.count}] | Agent {self.agent._agentid} | pub time is {d_float} seconds")
        ##TODO: why do we take the last device? Should it be a mean?
        if self.count % self.devices == 0:
            #eprint("Agent: {0}, current timestamp {1}, header timestamp {2}!".format(self.agent._agentid,
            #                                                                         utcnow_string,
            #                                                                         headers['TimeStamp']))
            #eprint("I'M HERE!")
            diff = client_time - header_time
            d_float = diff.seconds + (diff.microseconds* 0.000001)
            self.msg.append(d_float)
            # increment publish count
            eprint(f'[{self.agent._agentid}] done with publish [{self.publishes}]')
            self.publishes += 1

        #self.delta_list.append(diff)
        #avg = sum(self.delta_list, timedelta(0))/len(self.delta_list)

        if (self.count == self.max_publishes):
            eprint(f"finishing because count [{self.count}] == max_publishes [{self.max_publishes}] (publish counter is [{self.publishes}])")
            self.queue_put(self.msg)
            self.task.kill()
Beispiel #36
0
    def check_frequency(self, now):
        """Check to see if the passed in timestamp exceeds the configured
        max_data_frequency."""
        if self._max_data_frequency is None:
            return True

        now = utils.parse_timestamp_string(now)

        if self._next_allowed_publish is None:
            midnight = now.date()
            midnight = datetime.datetime.combine(midnight, datetime.time.min)
            self._next_allowed_publish = midnight
            while now > self._next_allowed_publish:
                self._next_allowed_publish += self._max_data_frequency

        if now < self._next_allowed_publish:
            return False

        while now >= self._next_allowed_publish:
            self._next_allowed_publish += self._max_data_frequency

        return True
Beispiel #37
0
    def check_frequency(self, now):
        """Check to see if the passed in timestamp exceeds the configured
        max_data_frequency."""
        if self._max_data_frequency is None:
            return True

        now = utils.parse_timestamp_string(now)

        if self._next_allowed_publish is None:
            midnight = now.date()
            midnight = datetime.datetime.combine(midnight, datetime.time.min)
            self._next_allowed_publish = midnight
            while now > self._next_allowed_publish:
                self._next_allowed_publish += self._max_data_frequency

        if now < self._next_allowed_publish:
            return False

        while now >= self._next_allowed_publish:
            self._next_allowed_publish += self._max_data_frequency

        return True
def main(database_name):
    db = sqlite3.connect(database_name)
    c = db.cursor()
    c.execute("select max(rowid) from data;")
    count = c.fetchone()[0]

    #Batches of 1000
    #We do this because of a bug in the sqlite implementation in python
    #which causes problems with nested cursors.
    for i in xrange(0, count, 1000):
        c.execute("select rowid, ts from data where rowid > ? order by rowid asc limit 1000;", (i,))
        rows = c.fetchall()
        print "processing rowid:", i+1, "to", i+len(rows)

        for rowid, ts in rows:
            #Skip already converted rows.
            if "T" in ts:
                continue

            new_ts = format_timestamp(parse_timestamp_string(ts))
            c.execute("update data set ts = ? where rowid = ?;", (new_ts, rowid))

        db.commit()
Beispiel #39
0
    def update(self):
        async_requests = [r.get_value_async_result(username=self.username,
                                                   password=self.password,
                                                   start_time=r.last_read) for r in self.registers]
        request_results = grequests.map(async_requests, size=10)
        print(request_results)

        temp_last_read = {}
        parsed_results = defaultdict(dict)
        for r, result in zip(self.registers, request_results):
            if result is None:
                _log.debug("request failed: {}".format(async_requests[request_results.index(result)]))
                continue
            parsed_result = r.parse_result(result.text)
            if parsed_result is not None:
                parsed_results[r.device_topic][r.point_name] = parsed_result
                temp_last_read[r.index] = utils.format_timestamp(
                    max(utils.parse_timestamp_string(time) for time in parsed_result.keys()))
        collated_results = self.collate_results(parsed_results)

        records = []
        for timestamp, record in collated_results.items():
            for topic in record.keys():
                records.append({'topic': topic,
                                'message': record[topic],
                                'headers': {headers_mod.DATE: timestamp}})

        self.publish_records(records)

        _log.debug("publish successful. Saving timestamps of latest data")
        last_read = self.last_read.copy()
        for r in self.registers:
            if r.index in temp_last_read:
                last_read[r.index] = temp_last_read[r.index]
                r.last_read = temp_last_read[r.index]
        self.last_read = last_read
        self.vip.config.set("last_read", last_read)
Beispiel #40
0
    def get_devices(self):
        cp = deepcopy(self._devices)
        foundbad = False

        for k, v in cp.items():
            dt = parse_timestamp_string(v['last_published_utc'])
            dtnow = get_aware_utc_now()
            if dt + datetime.timedelta(minutes=5) < dtnow:
                v['health'] = Status.build(
                    BAD_STATUS,
                    'Too long between publishes for {}'.format(k)).as_dict()
                foundbad = True
            else:
                v['health'] = Status.build(GOOD_STATUS).as_dict()

        if len(cp):
            if foundbad:
                self.vip.health.set_status(
                    BAD_STATUS,
                    'At least one device has not published in 5 minutes')
            else:
                self.vip.health.set_status(GOOD_STATUS,
                                           'All devices publishing normally.')
        return cp
Beispiel #41
0
    def configure_main(self, config_name, action, contents):
        config = self.default_config.copy()
        config.update(contents)

        if action == "NEW":
            try:
                self.max_open_sockets = config["max_open_sockets"]
                if self.max_open_sockets is not None:
                    max_open_sockets = int(self.max_open_sockets)
                    configure_socket_lock(max_open_sockets)
                    _log.info("maximum concurrently open sockets limited to " +
                              str(max_open_sockets))
                elif self.system_socket_limit is not None:
                    max_open_sockets = int(self.system_socket_limit * 0.8)
                    _log.info("maximum concurrently open sockets limited to " +
                              str(max_open_sockets) +
                              " (derived from system limits)")
                    configure_socket_lock(max_open_sockets)
                else:
                    configure_socket_lock()
                    _log.warn(
                        "No limit set on the maximum number of concurrently open sockets. "
                        "Consider setting max_open_sockets if you plan to work with 800+ modbus devices."
                    )

                self.max_concurrent_publishes = config[
                    'max_concurrent_publishes']
                max_concurrent_publishes = int(self.max_concurrent_publishes)
                if max_concurrent_publishes < 1:
                    _log.warn(
                        "No limit set on the maximum number of concurrent driver publishes. "
                        "Consider setting max_concurrent_publishes if you plan to work with many devices."
                    )
                else:
                    _log.info(
                        "maximum concurrent driver publishes limited to " +
                        str(max_concurrent_publishes))
                configure_publish_lock(max_concurrent_publishes)

                self.scalability_test = bool(config["scalability_test"])
                self.scalability_test_iterations = int(
                    config["scalability_test_iterations"])

                if self.scalability_test:
                    self.waiting_to_finish = set()
                    self.test_iterations = 0
                    self.test_results = []
                    self.current_test_start = None

            except ValueError as e:
                _log.error(
                    "ERROR PROCESSING STARTUP CRITICAL CONFIGURATION SETTINGS: {}"
                    .format(e))
                _log.error("MASTER DRIVER SHUTTING DOWN")
                sys.exit(1)

        else:
            if self.max_open_sockets != config["max_open_sockets"]:
                _log.info(
                    "The master driver must be restarted for changes to the max_open_sockets setting to take effect"
                )

            if self.max_concurrent_publishes != config[
                    "max_concurrent_publishes"]:
                _log.info(
                    "The master driver must be restarted for changes to the max_concurrent_publishes setting to take effect"
                )

            if self.scalability_test != bool(config["scalability_test"]):
                if not self.scalability_test:
                    _log.info(
                        "The master driver must be restarted with scalability_test set to true in order to run a test."
                    )
                if self.scalability_test:
                    _log.info(
                        "A scalability test may not be interrupted. Restarting the driver is required to stop the test."
                    )
            try:
                if self.scalability_test_iterations != int(
                        config["scalability_test_iterations"]
                ) and self.scalability_test:
                    _log.info(
                        "A scalability test must be restarted for the scalability_test_iterations setting to take effect."
                    )
            except ValueError:
                pass

        #update override patterns
        if self._override_patterns is None:
            try:
                values = self.vip.config.get("override_patterns")
                values = jsonapi.loads(values)

                if isinstance(values, dict):
                    self._override_patterns = set()
                    for pattern, end_time in values.items():
                        #check the end_time
                        now = utils.get_aware_utc_now()
                        #If end time is indefinite, set override with indefinite duration
                        if end_time == "0.0":
                            self._set_override_on(pattern,
                                                  0.0,
                                                  from_config_store=True)
                        else:
                            end_time = utils.parse_timestamp_string(end_time)
                            # If end time > current time, set override with new duration
                            if end_time > now:
                                delta = end_time - now
                                self._set_override_on(pattern,
                                                      delta.total_seconds(),
                                                      from_config_store=True)
                else:
                    self._override_patterns = set()
            except KeyError:
                self._override_patterns = set()
            except ValueError:
                _log.error(
                    "Override patterns is not set correctly in config store")
                self._override_patterns = set()
        try:
            driver_scrape_interval = float(config["driver_scrape_interval"])
        except ValueError as e:
            _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e))
            _log.error("Master driver scrape interval settings unchanged")
            # TODO: set a health status for the agent

        try:
            group_offset_interval = float(config["group_offset_interval"])
        except ValueError as e:
            _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e))
            _log.error("Master driver group interval settings unchanged")
            # TODO: set a health status for the agent

        if self.scalability_test and action == "UPDATE":
            _log.info(
                "Running scalability test. Settings may not be changed without restart."
            )
            return

        if (self.driver_scrape_interval != driver_scrape_interval
                or self.group_offset_interval != group_offset_interval):
            self.driver_scrape_interval = driver_scrape_interval
            self.group_offset_interval = group_offset_interval

            _log.info("Setting time delta between driver device scrapes to  " +
                      str(driver_scrape_interval))

            #Reset all scrape schedules
            self.freed_time_slots.clear()
            self.group_counts.clear()
            for driver in self.instances.itervalues():
                time_slot = self.group_counts[driver.group]
                driver.update_scrape_schedule(time_slot,
                                              self.driver_scrape_interval,
                                              driver.group,
                                              self.group_offset_interval)
                self.group_counts[driver.group] += 1

        self.publish_depth_first_all = bool(config["publish_depth_first_all"])
        self.publish_breadth_first_all = bool(
            config["publish_breadth_first_all"])
        self.publish_depth_first = bool(config["publish_depth_first"])
        self.publish_breadth_first = bool(config["publish_breadth_first"])

        #Update the publish settings on running devices.
        for driver in self.instances.itervalues():
            driver.update_publish_types(self.publish_depth_first_all,
                                        self.publish_breadth_first_all,
                                        self.publish_depth_first,
                                        self.publish_breadth_first)
Beispiel #42
0
def get_normalized_time_offset(time_string):
    """Parses time_string and returns timeslot of the the value assuming 1 second publish interval
    and 0.1 second driver_scrape_interval."""
    ts = parse_timestamp_string(time_string)
    return ts.microsecond // 100000
def test_success_forecast(cleanup_cache, weather, query_agent, locations):
    """
    Tests the basic functionality of a weather agent under optimal conditions.
    :param weather: instance of weather service to be tested
    :param query_agent: agent to leverage to use RPC calls
    """
    print(datetime.utcnow())
    query_data = query_agent.vip.rpc.call(identity, 'get_hourly_forecast',
                                          locations, hours=2).get(timeout=30)
    # print(query_data)
    assert len(query_data) == len(locations)
    for x in range(0, len(query_data)):
        location_data = query_data[x]
        assert (location_data.get("lat") and location_data.get("long")) or \
               (location_data.get("wfo") and location_data.get(
                   "x") and location_data.get("y"))
        results = location_data.get("weather_results")
        error = location_data.get("weather_error")
        if error and not results:
            if error.startswith("Remote API returned no data") \
                    or error.startswith("Remote API redirected request, but "
                                        "redirect failed") \
                    or error.startswith("Remote API returned invalid "
                                        "response") \
                    or error.startswith("API request failed with "
                                        "unexpected response"):
                assert True
            else:
                assert False
        if results:
            assert location_data.get("generation_time")
            for record in results:
                forecast_time = utils.parse_timestamp_string(record[0])
                assert isinstance(forecast_time, datetime)

    cache_data = query_agent.vip.rpc.call(identity, 'get_hourly_forecast',
                                          locations,
                                          hours=2).get(timeout=30)
    assert len(cache_data) == len(query_data)
    for x in range(0, len(cache_data)):
        query_location_data = query_data[x]
        cache_location_data = cache_data[x]
        assert cache_location_data.get(
            "generation_time") == query_location_data.get("generation_time")
        if cache_location_data.get("lat") and cache_location_data.get("long"):
            assert cache_location_data.get("lat") == query_location_data.get(
                "lat")
            assert cache_location_data.get("long") == query_location_data.get(
                "long")
        elif cache_location_data.get("wfo") and cache_location_data.get(
                "x") and cache_location_data.get("y"):
            assert cache_location_data.get("wfo") == query_location_data.get(
                "wfo")
            assert cache_location_data.get("x") == query_location_data.get("x")
            assert cache_location_data.get("y") == query_location_data.get("y")
        else:
            assert False
        if cache_location_data.get("weather_results"):

            query_weather_results = query_location_data.get("weather_results")
            cache_weather_results = cache_location_data.get("weather_results")
            for y in range(0, len(query_weather_results)):
                result = query_weather_results[y]
                cache_result = cache_weather_results[y]
                query_time, oldtz = utils.process_timestamp(result[0])
                query_time = utils.format_timestamp(query_time)
                assert query_time == cache_result[0]
                for key in cache_result[1]:
                    assert cache_result[1][key] == result[1][key]
        else:
            results = cache_location_data.get("weather_error")
            if results.startswith("Remote API returned no data") \
                    or results.startswith("Remote API redirected request, but "
                                          "redirect failed") \
                    or results.startswith("Remote API returned invalid "
                                          "response") \
                    or results.startswith("API request failed with unexpected "
                                          "response"):
                assert True
            else:
                assert False
Beispiel #44
0
 def match_make_offer(self, peer, sender, bus, topic, headers, message):
     timestamp = utils.parse_timestamp_string(message[0])
     unformed_markets = message[1]
     decoded_message = "Timestamp: {}".format(timestamp)
     self.log_event("match_make_offer", peer, sender, bus, topic, headers, decoded_message)
     self.registrations.request_offers(timestamp, unformed_markets)
Beispiel #45
0
 def parse_timestamp(timestamp):
     timestamp = utils.parse_timestamp_string(timestamp)
     timestamp = timestamp.replace(second=0, microsecond=0)
     timestamp = utils.format_timestamp(timestamp)
     return timestamp
Beispiel #46
0
    def request_new_schedule(self, requester_id, task_id, priority, requests):
        """
        RPC method
=======
                
                error = {'type': ex.__class__.__name__, 'value': str(ex)}
                self.push_result_topic_pair(ERROR_RESPONSE_PREFIX,
                                            point, headers, error)
                _log.debug('Actuator Agent Error: '+str(error))
                
                
        @RPC.export        
        def get_point(self, topic):
            topic = topic.strip('/')
            _log.debug('handle_get: {topic}'.format(topic=topic))
            path, point_name = topic.rsplit('/', 1)
            return self.vip.rpc.call(driver_vip_identity, 'get_point', path, point_name).get()
        
        @RPC.export
        def set_point(self, requester_id, topic, value):  
            topic = topic.strip('/')
            _log.debug('handle_set: {topic},{requester_id}, {value}'.
                       format(topic=topic, requester_id=requester_id, value=value))
            
            path, point_name = topic.rsplit('/', 1)
            
            headers = self.get_headers(requester_id)
            
            if self.check_lock(path, requester_id):
                result = self.vip.rpc.call(driver_vip_identity, 'set_point', path, point_name, value).get()
        
                headers = self.get_headers(requester_id)
                self.push_result_topic_pair(WRITE_ATTEMPT_PREFIX,
                                            topic, headers, value)
                self.push_result_topic_pair(VALUE_RESPONSE_PREFIX,
                                            topic, headers, result)
            else:
                raise LockError("caller does not have this lock")
                
            return result

        def check_lock(self, device, requester):
            _log.debug('check_lock: {device}, {requester}'.format(device=device, 
                                                                  requester=requester))
            device = device.strip('/')
            if device in self._device_states:
                device_state = self._device_states[device]
                return device_state.agent_id == requester
            return False


        def handle_schedule_request(self, peer, sender, bus, topic, headers, message):
            request_type = headers.get('type')
            _log.debug('handle_schedule_request: {topic}, {headers}, {message}'.
                       format(topic=topic, headers=str(headers), message=str(message)))
            
            requester_id = headers.get('requesterID')
            task_id = headers.get('taskID')
            priority = headers.get('priority')
                   
            if request_type == SCHEDULE_ACTION_NEW:
                try:
                    if len(message) == 1:
                        requests = message[0]
                    else:
                        requests = message
                
                    self.request_new_schedule(requester_id, task_id, priority, requests)
                except StandardError as ex:
                    _log.error('bad request: {request}, {error}'.format(request=requests, error=str(ex)))
                    self.vip.pubsub.publish('pubsub', topics.ACTUATOR_SCHEDULE_RESULT(), headers,
                                            {'result':SCHEDULE_RESPONSE_FAILURE, 
                                             'data': {},
                                             'info': 'INVALID_REQUEST_TYPE'})
                    
            elif request_type == SCHEDULE_ACTION_CANCEL:
                try:
                    self.request_cancel_schedule(requester_id, task_id)
                except StandardError as ex:
                    _log.error('bad request: {request}, {error}'.format(request=requests, error=str(ex)))
                    self.vip.pubsub.publish('pubsub', topics.ACTUATOR_SCHEDULE_RESULT(), headers,
                                            {'result':SCHEDULE_RESPONSE_FAILURE, 
                                             'data': {},
                                             'info': 'INVALID_REQUEST_TYPE'})
                
            else:
                _log.debug('handle-schedule_request, invalid request type')
                self.vip.pubsub.publish('pubsub', topics.ACTUATOR_SCHEDULE_RESULT(), headers,
                                        {'result':SCHEDULE_RESPONSE_FAILURE, 
                                         'data': {},
                                         'info': 'INVALID_REQUEST_TYPE'})
            
>>>>>>> refs/remotes/origin/master
        
        Requests one or more blocks on time on one or more device.
        
        :param requester_id: Requester name. 
        :param task_id: Task name.
        :param priority: Priority of the task. Must be either "HIGH", "LOW", or "LOW_PREEMPT"
        :param requests: A list of time slot requests in the format described in `Device Schedule`_.
        
        :type requester_id: str
        :type task_id: str
        :type priority: str
        :type request: list
        :returns: Request result
        :rtype: dict       
        
        :Return Values:
        
            The return values are described in `New Task Response`_.
        """
                     
        now = datetime.datetime.now()

        topic = topics.ACTUATOR_SCHEDULE_RESULT()
        headers = self._get_headers(requester_id, task_id=task_id)
        headers['type'] = SCHEDULE_ACTION_NEW

        try:
            if requests and isinstance(requests[0], basestring):
                requests = [requests]
            requests = [[r[0].strip('/'),utils.parse_timestamp_string(r[1]),utils.parse_timestamp_string(r[2])] for r in requests]

        except StandardError as ex:
            return self._handle_unknown_schedule_error(ex, headers, requests)

        _log.debug("Got new schedule request: {}, {}, {}, {}".
                   format(requester_id, task_id, priority, requests))

        result = self._schedule_manager.request_slots(requester_id, task_id, requests, priority, now)
        success = SCHEDULE_RESPONSE_SUCCESS if result.success else SCHEDULE_RESPONSE_FAILURE

        # Dealing with success and other first world problems.
        if result.success:
            self._update_device_state_and_schedule(now)
            for preempted_task in result.data:
                preempt_headers = self._get_headers(preempted_task[0], task_id=preempted_task[1])
                preempt_headers['type'] = SCHEDULE_ACTION_CANCEL
                self.vip.pubsub.publish('pubsub', topic, headers=preempt_headers,
                                        message={'result': SCHEDULE_CANCEL_PREEMPTED,
                                                 'info': '',
                                                 'data': {'agentID': requester_id,
                                                          'taskID': task_id}})

        # If we are successful we do something else with the real result data
        data = result.data if not result.success else {}

        results = {'result': success,
                   'data': data,
                   'info': result.info_string}
        self.vip.pubsub.publish('pubsub', topic, headers=headers, message=results)

        return results
Beispiel #47
0
    def configure_main(self, config_name, action, contents):
        config = self.default_config.copy()
        config.update(contents)

        if action == "NEW":
            try:
                self.max_open_sockets = config["max_open_sockets"]
                if self.max_open_sockets is not None:
                    max_open_sockets = int(self.max_open_sockets)
                    configure_socket_lock(max_open_sockets)
                    _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets))
                elif self.system_socket_limit is not None:
                    max_open_sockets = int(self.system_socket_limit * 0.8)
                    _log.info("maximum concurrently open sockets limited to " + str(max_open_sockets) +
                              " (derived from system limits)")
                    configure_socket_lock(max_open_sockets)
                else:
                    configure_socket_lock()
                    _log.warn("No limit set on the maximum number of concurrently open sockets. "
                              "Consider setting max_open_sockets if you plan to work with 800+ modbus devices.")

                self.max_concurrent_publishes = config['max_concurrent_publishes']
                max_concurrent_publishes = int(self.max_concurrent_publishes)
                if max_concurrent_publishes < 1:
                    _log.warn("No limit set on the maximum number of concurrent driver publishes. "
                              "Consider setting max_concurrent_publishes if you plan to work with many devices.")
                else:
                    _log.info("maximum concurrent driver publishes limited to " + str(max_concurrent_publishes))
                configure_publish_lock(max_concurrent_publishes)

                self.scalability_test = bool(config["scalability_test"])
                self.scalability_test_iterations = int(config["scalability_test_iterations"])

                if self.scalability_test:
                    self.waiting_to_finish = set()
                    self.test_iterations = 0
                    self.test_results = []
                    self.current_test_start = None

            except ValueError as e:
                _log.error("ERROR PROCESSING STARTUP CRITICAL CONFIGURATION SETTINGS: {}".format(e))
                _log.error("MASTER DRIVER SHUTTING DOWN")
                sys.exit(1)

        else:
            if self.max_open_sockets != config["max_open_sockets"]:
                _log.info("The master driver must be restarted for changes to the max_open_sockets setting to take effect")

            if self.max_concurrent_publishes != config["max_concurrent_publishes"]:
                _log.info("The master driver must be restarted for changes to the max_concurrent_publishes setting to take effect")

            if self.scalability_test != bool(config["scalability_test"]):
                if not self.scalability_test:
                    _log.info(
                        "The master driver must be restarted with scalability_test set to true in order to run a test.")
                if self.scalability_test:
                    _log.info(
                        "A scalability test may not be interrupted. Restarting the driver is required to stop the test.")
            try:
                if self.scalability_test_iterations != int(config["scalability_test_iterations"]) and self.scalability_test:
                    _log.info(
                "A scalability test must be restarted for the scalability_test_iterations setting to take effect.")
            except ValueError:
                pass

        #update override patterns
        if self._override_patterns is None:
            try:
                values = self.vip.config.get("override_patterns")
                values = jsonapi.loads(values)

                if isinstance(values, dict):
                    self._override_patterns = set()
                    for pattern, end_time in values.items():
                        #check the end_time
                        now = utils.get_aware_utc_now()
                        #If end time is indefinite, set override with indefinite duration
                        if end_time == "0.0":
                            self._set_override_on(pattern, 0.0, from_config_store=True)
                        else:
                            end_time = utils.parse_timestamp_string(end_time)
                            # If end time > current time, set override with new duration
                            if end_time > now:
                                delta = end_time - now
                                self._set_override_on(pattern, delta.total_seconds(), from_config_store=True)
                else:
                    self._override_patterns = set()
            except KeyError:
                self._override_patterns = set()
            except ValueError:
                _log.error("Override patterns is not set correctly in config store")
                self._override_patterns = set()
        try:
            driver_scrape_interval = float(config["driver_scrape_interval"])
        except ValueError as e:
            _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e))
            _log.error("Master driver scrape interval settings unchanged")
            # TODO: set a health status for the agent

        try:
            group_offset_interval = float(config["group_offset_interval"])
        except ValueError as e:
            _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e))
            _log.error("Master driver group interval settings unchanged")
            # TODO: set a health status for the agent

        if self.scalability_test and action == "UPDATE":
            _log.info("Running scalability test. Settings may not be changed without restart.")
            return

        if (self.driver_scrape_interval != driver_scrape_interval or
                    self.group_offset_interval != group_offset_interval):
            self.driver_scrape_interval = driver_scrape_interval
            self.group_offset_interval = group_offset_interval

            _log.info("Setting time delta between driver device scrapes to  " + str(driver_scrape_interval))

            #Reset all scrape schedules
            self.freed_time_slots.clear()
            self.group_counts.clear()
            for driver in self.instances.itervalues():
                time_slot = self.group_counts[driver.group]
                driver.update_scrape_schedule(time_slot, self.driver_scrape_interval,
                                              driver.group, self.group_offset_interval)
                self.group_counts[driver.group] += 1

        self.publish_depth_first_all = bool(config["publish_depth_first_all"])
        self.publish_breadth_first_all = bool(config["publish_breadth_first_all"])
        self.publish_depth_first = bool(config["publish_depth_first"])
        self.publish_breadth_first = bool(config["publish_breadth_first"])

        #Update the publish settings on running devices.
        for driver in self.instances.itervalues():
            driver.update_publish_types(self.publish_depth_first_all,
                                        self.publish_breadth_first_all,
                                        self.publish_depth_first,
                                        self.publish_breadth_first)
Beispiel #48
0
 def match_reservation(self, peer, sender, bus, topic, headers, message):
     timestamp = utils.parse_timestamp_string(message[0])
     decoded_message = "Timestamp: {}".format(timestamp)
     self.log_event("match_reservation", peer, sender, bus, topic, headers, decoded_message)
     self.registrations.request_reservations(timestamp)
Beispiel #49
0
    def query_historian(self, topic, start=None, end=None, agg_type=None,
                        agg_period=None, skip=0, count=None,
                        order="FIRST_TO_LAST"):

        # # Verify that we have initialized through the historian setup code
        # # before we do anything else.
        # if not self._initialized:
        #     self.historian_setup()
        #     if not self._initialized:
        #         return {}

        if count is not None:
            try:
                count = int(count)
            except ValueError:
                count = 20
            else:
                # protect the querying of the database limit to 500 at a time.
                if count > 500:
                    count = 500

        # Final results that are sent back to the client.
        results = {}

        # A list or a single topic is now accepted for the topic parameter.
        if not isinstance(topic, list):
            topics = [topic]
        else:
            # Copy elements into topic list
            topics = [x for x in topic]

        values = defaultdict(list)
        metadata = {}
        table_name = "{}.data".format(self._schema)
        client = self.get_client(self._host, self._error_trace)
        cursor = client.cursor()

        for topic in topics:
            query, args = self._build_single_topic_select_query(
                start, end, agg_type, agg_period, skip, count, order,
                table_name, topic)

            cursor.execute(query, args)

            for _id, ts, value, meta in cursor.fetchall():
                try:
                    value = float(value)
                except ValueError:
                    pass

                values[topic].append(
                    (
                        utils.format_timestamp(
                            utils.parse_timestamp_string(ts)),
                        value
                    )
                )
                if len(topics) == 1:
                    metadata = meta
        cursor.close()
        client.close()

        if len(topics) > 1:
            results['values'] = values
            results['metadata'] = {}
        elif len(topics) == 1:  # return the list from the single topic
            results['values'] = values[topics[0]]
            results['metadata'] = metadata

        return results