def update_asset_devices_table():
    """Use this method to fill out the asset devices table that corresponds devices to the assets that are related to them. The idea here is to use ASSETs to represent spaces and the ThingsBoard relation property to associate DEVICEs to those
    assets as a way to represent the devices currently installed and monitoring that space
    @:raise mysql_utils.MySQLDatabaseException - For problems related with the database access
    @:raise utils.ServiceEndpointException - For issues related with the remote API call
    @:raise utils.AuthenticationException - For problems related with the authentication credentials used"""

    asset_devices_log = ambi_logger.get_logger(__name__)

    asset_devices_table_name = proj_config.mysql_db_tables['asset_devices']
    assets_table_name = proj_config.mysql_db_tables['tenant_assets']
    devices_table_name = proj_config.mysql_db_tables['devices']

    # Fire up the database access objects
    database_name = user_config.access_info['mysql_database']['database']
    cnx = mysql_utils.connect_db(database_name=database_name)
    outer_select_cursor = cnx.cursor(buffered=True)
    inner_select_cursor = cnx.cursor(buffered=True)

    mysql_utils.reset_table(table_name=asset_devices_table_name)

    # First get a list of all the assets supported so far. Refresh the asset database table first of all
    mysql_asset_controller.update_tenant_assets_table()
    # And the devices table too since I need data from there too later on
    mysql_device_controller.update_devices_table()

    # And grab all assets ids, names and types (I need those for later)
    sql_select = """SELECT id, name, type FROM """ + str(
        assets_table_name) + """;"""

    # Execute the statement with the select cursor
    outer_select_cursor = mysql_utils.run_sql_statement(
        cursor=outer_select_cursor, sql_statement=sql_select, data_tuple=())

    # Check if any results came back
    if outer_select_cursor.rowcount <= 0:
        error_msg = "Unable to get any results from {0}.{1} with '{2}'...".format(
            str(database_name), str(assets_table_name),
            str(outer_select_cursor.statement))
        asset_devices_log.error(error_msg)
        outer_select_cursor.close()
        inner_select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)

    # Got some results. Process them then
    else:
        # For each valid ASSET Id found in this table, run a query in the ThingsBoard side of things for all DEVICEs that have a relation to that asset and send it to the database
        asset_info = outer_select_cursor.fetchone()

        # Set the common used parameters for the entity-relation call
        entityType = "ASSET"
        relationTypeGroup = "COMMON"
        direction = "FROM"

        # Run this while there are still asset ids to process
        while asset_info:

            # Query for related devices
            api_response = tb_entity_relation_controller.findByQuery(
                entityType=entityType,
                entityId=asset_info[0],
                relationTypeGroup=relationTypeGroup,
                direction=direction)

            # Get rid of all non-Python terms in the response dictionary and cast it as a list too
            relation_list = eval(
                utils.translate_postgres_to_python(api_response.text))

            # Now lets format this info accordingly and send it to the database
            for relation in relation_list:
                # Create a dictionary to store all the data to send to the database. For now its easier to manipulate one of these and cast it to a tuple just before executing the statement
                data_dict = {
                    "fromEntityType": relation["from"]["entityType"],
                    "fromId": relation["from"]["id"],
                    "fromName": asset_info[1],
                    "fromType": asset_info[2],
                    "toEntityType": relation["to"]["entityType"],
                    "toId": relation["to"]["id"],
                    "toName": None,
                    "toType": None,
                    "relationType": relation["type"],
                    "relationGroup": relation["typeGroup"],
                }

                # As always, take care with the stupid 'description'/'additionalInfo' issue...
                if relation["additionalInfo"] is not None:
                    try:
                        # Try to get a 'description' from the returned dictionary from the 'additionalInfo' sub dictionary
                        data_dict["description"] = relation["additionalInfo"][
                            "description"]
                    # If the field wasn't set, instead of crashing the code
                    except KeyError:
                        # Simply set this field to None and move on with it...
                        data_dict["description"] = None
                else:
                    data_dict["description"] = None

                # And now to get the data to use in the INSERT statement. For that I need to do a quick SELECT first since I need info from a different side too
                if data_dict['toEntityType'] == 'DEVICE':
                    sql_select = """SELECT name, type FROM """ + str(
                        devices_table_name) + """ WHERE id = %s;"""
                elif data_dict['toEntityType'] == 'ASSET':
                    sql_select = """SELECT name, type FROM """ + str(
                        assets_table_name) + """ WHERE id = %s;"""

                data_tuple = (relation['to']['id'], )

                # NOTE: I need to use the change cursor because my select_cursor still has unprocessed results from the previous SELECT execution and using it would delete those
                inner_select_cursor = mysql_utils.run_sql_statement(
                    cursor=inner_select_cursor,
                    sql_statement=sql_select,
                    data_tuple=data_tuple)

                # Check if any results were returned. Use them if so, otherwise replace the missing results with 'Unknown'. In this case there's no point in raising Exceptions if I can't find the actual name or type of the related device
                if inner_select_cursor.rowcount > 0:
                    # I got the data I was looking for from the devices table
                    result = inner_select_cursor.fetchone()
                    data_dict["toName"] = result[0]
                    data_dict["toType"] = result[1]

                # Write the data in the database table
                mysql_utils.add_data_to_table(
                    table_name=asset_devices_table_name, data_dict=data_dict)

            # Finished with the current asset. Fetch the next one and repeat the cycle if its not None
            asset_info = outer_select_cursor.fetchone()

        # Done with everything, I believe. Close the database access objects and carry on
        outer_select_cursor.close()
        inner_select_cursor.close()
        cnx.close()
コード例 #2
0
def getLatestTimeseries(device_name, timeseries_keys_filter=None):
    """
    This method is analogous to the previous one, i.e., it also retrieves Timeseries data that is associated to the device identified by 'device_name', but in this particular case only one timestamp/value pair is returned for each of the device's
    measurements, namely the last one recorded by the Thingsboard installation that oversees that device. This method is very useful to:
    1. Determine if a device is working by retrieving the last recorded data.
    2. Determine the timeseries keys associated to the device, as well the last timestamp associated to them.
    3. Determine the most recent end_date possible for that device in a direct way - once this parameter is known, a more complete and insightful getTimeseries call can then be placed.
    @:param device_name (str) - The name of the device to which the latest associated timeseries should be retrieved by this method.
    @:param timeseries_keys_filter (list of str) - A list with the names of the timeseries keys to be returned by the remote API. If a valid list is provided, only data for the keys specified in this list are going to be returned. This method
    validates this list against any associated timeseriesKeys for the device: mismatched elements from this list are to be ignored.
    @:raise utils.InputValidationException - If any of the inputs fails initial validation
    @:raise utils.ServiceEndpointException - If errors occur when invoking any remote API services
    @:raise mysql_utils.MySQLDatabaseException - If errors occur when accessing the database
    @:return None if the API returns an empty set, otherwise returns a dictionary with the following format:
    device_data =
    {
        "timeseriesKey_1": [
            {
              "ts": int,
              "value": str
            }
        ],
        "timeseriesKey_2": [
            {
              "ts": int,
              "value": str
            }
        ],
        ...
        "timeseriesKey_N": [
            {
                "ts": int,
                "value": str
            }
        ]
    }
    """
    log = ambi_logger.get_logger(__name__)

    utils.validate_input_type(device_name, str)
    if timeseries_keys_filter:
        utils.validate_input_type(timeseries_keys_filter, list)

        for ts_key in timeseries_keys_filter:
            utils.validate_input_type(ts_key, str)

    # Grab the device credentials at this point. If the method returns anything (not None), than assume that its the following tuple: (entityType, entityId, timeseriesKeys_list)
    device_cred = mysql_device_controller.get_device_credentials(
        device_name=device_name)

    if device_cred is None:
        error_msg = "Could not get valid credentials for device '{0}'. Cannot continue...".format(
            str(device_name))
        log.error(error_msg)
        raise mysql_utils.MySQLDatabaseException(message=error_msg)

    # Validate the timeseries keys, if any were provided
    keys = None
    if timeseries_keys_filter:
        valid_keys = []
        for ts_filter_key in timeseries_keys_filter:
            # Filter out only the valid keys, i.e., the ones with a correspondence in the list returned from the database
            if ts_filter_key in device_cred[2]:
                valid_keys.append(ts_filter_key)

        # Check if at least one of the proposed keys made it to the valid list. If not, default to the list returned from the database (if this one is also not empty)
        if not len(valid_keys):
            log.warning(
                "Could not validate any of the filter keys ({0}) provided as argument!"
                .format(str(timeseries_keys_filter)))

            # Check if the timeseriesKeys element returned from the device credentials request was a single element list with an empty string inside it
            if len(device_cred[2]) == 1 and device_cred[2][0] == "":
                log.warning(
                    "The database didn't return any valid set of timeseriesKeys. Omitting this argument in the API call"
                )
                # Don't do anything else. The 'keys' parameter is already None. Keep it as that then
        else:
            keys = ",".join(valid_keys)

    # I have all I need to execute the remote API call
    service_endpoint = "/api/plugins/telemetry/{0}/{1}/values/timeseries".format(
        str(device_cred[0]), str(device_cred[1]))

    # Add the keys filter, if it was provided
    if keys is not None:
        service_endpoint += "?keys={0}".format(str(keys))

    # Service endpoint is done. Grab the service calling dictionary
    service_dict = utils.build_service_calling_info(
        auth_token=mac.get_auth_token(user_type='tenant_admin'),
        service_endpoint=service_endpoint)

    # Execute the remote call finally
    try:
        response = requests.get(url=service_dict['url'],
                                headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout):
        error_msg = "Unable to establish a connection with {0}...".format(
            str(service_dict['url']))
        log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)

    # Check the HTTP status code in the response
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received HTTP {0} with message {1}!".format(
            str(response.status_code), str(eval(response.text)['message']))
        log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # Send back the response already in dictionary form
        return eval(response.text)
コード例 #3
0
def add_table_data(data_dict, table_name):
    """
    Method that abstracts the insertion of data into the provided database table. The method maps the provided data dictionary to any available column in the table identified in table name. The method uses the table data as main reference, i.e.,
    it only writes data whose key in the data dictionary has a direct correspondence to a table column in the database. If more the data dictionary has more keys/items than available columns, an log warning is issued about it but the method
    carries on writing in all available data.
    @:param data_dict (dict) - A dict structure, i.e., a key-value arrangement with the data to be added/updated into the database table. IMPORTANT: The table columns name were prepared such that there's a one-to-one equivalence between them and
    the expected keys in the data dictionary.
    @:param table_name (str) - The name of the database where the data dict has to be written into.
    @:raise utils.InputValidationException - If any of the inputs fails initial validation.
    @:raise mysql_utils.MySQLDatabaseException - If any issues occur with the database accesses.
    @:return result (bool) - If the database addition/update was performed successfully, this method returns True. Otherwise, the appropriate exception is raised with the details on why it was raised in the first place.
    """
    log = ambi_logger.get_logger(__name__)

    # Validate inputs
    utils.validate_input_type(data_dict, dict)
    utils.validate_input_type(table_name, str)

    # Prepare the database access objects
    database_name = user_config.access_info['mysql_database']['database']
    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)
    change_cursor = cnx.cursor(buffered=True)

    # First, check if the table exists
    sql_select = """SHOW tables FROM """ + str(database_name) + """;"""

    select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                  sql_statement=sql_select,
                                                  data_tuple=())

    records = select_cursor.fetchall()

    # The result of the previous fetchall command is a list of one element tuples, hence why I'm putting the name that I want to verify its existence in such list as a one element tuple. The alternative was to format the previous list into a
    # single string list element, but it is way more simple this way
    if (table_name, ) not in records:
        error_msg = "The table name provided: {0} doesn't exist yet in database {1}. Cannot continue.".format(
            str(table_name), str(database_name))
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Okay, the table exists in the database. Get all its columns into a list
    column_list = mysql_utils.get_table_columns(database_name=database_name,
                                                table_name=table_name)

    # And create the standard INSERT statement from it
    sql_insert = mysql_utils.create_insert_sql_statement(
        column_list=column_list, table_name=table_name)

    # Build the respective data tuple by going through all column names and checking if there is a corresponding key in the data dictionary
    data_list = []

    for i in range(0, len(column_list)):
        try:
            # First, try to retrieve a value into the data list by doing a direct retrieval from the data dictionary using the column name as key
            data_list.append(data_dict[column_list[i]])

        # If the current column name doesn't have a matching key in the data dictionary, catch the expected Exception
        except KeyError:
            # And replace the missing value with a None since, by default, all table columns were created in a way where they hold such value
            # But warn the user first
            log.warning(
                "Didn't find any '{0}' keys in the data dictionary provided. Setting the {0} column in {1}.{2} to NULL"
                .format(str(column_list[0]), str(database_name),
                        str(table_name)))
            # And set the value then
            data_list.append(None)

    # Done. Proceed with the INSERT
    try:
        change_cursor = mysql_utils.run_sql_statement(
            cursor=change_cursor,
            sql_statement=sql_insert,
            data_tuple=tuple(data_list))

        # Check the outcome of the previous execution. If no columns were changed in the previous statement, raise a 'Duplicate entry' Exception to trigger an UPDATE instead
        if change_cursor.rowcount is 0:
            # No changes to the database table detected. Trigger an UPDATE then
            raise mysql_utils.MySQLDatabaseException(
                message=proj_config.double_record_msg)
        elif change_cursor.rowcount == 1:
            # In this case, all went well. Close the database access objects, commit the changes to the database, inform the user of this and move on
            log.info("Successfully added a new record to {0}.{1}".format(
                str(database_name), str(table_name)))
            cnx.commit()
            select_cursor.close()
            change_cursor.close()
            cnx.close()
            return True

    # Watch out for the typical "Duplicate entry" exception
    except mysql_utils.MySQLDatabaseException as mse:
        if proj_config.double_record_msg in mse.message:
            trigger_column_list = mysql_utils.get_trigger_columns(
                table_name=table_name)

            # Cool. Use this data to get the respective UPDATE statement
            sql_update = mysql_utils.create_update_sql_statement(
                column_list=column_list,
                table_name=table_name,
                trigger_column_list=trigger_column_list)

            # And complete the existing data list by appending to it the values corresponding to the elements in the trigger list
            for trigger_column_name in trigger_column_list:
                try:
                    data_list.append(data_dict[trigger_column_name])
                except KeyError:
                    error_msg = "The value for the trigger column '{0}' cannot be found among the data dictionary elements! Cannot continue!".format(
                        str(trigger_column_name))
                    log.error(error_msg)
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    raise mysql_utils.MySQLDatabaseException(message=error_msg)

            # Done. Run the UPDATE statement, still looking for duplicate records
            try:
                change_cursor = mysql_utils.run_sql_statement(
                    cursor=change_cursor,
                    sql_statement=sql_update,
                    data_tuple=tuple(data_list))

                # Check the UPDATE execution status
                if change_cursor.rowcount is 0:
                    # If nothing happened, the record already exists in the database. Give a bit of heads up and move on
                    log.warning(
                        "A record with data:\n{0}\n already exists in {1}.{2}. Nothing more to do..."
                        .format(str(data_list), str(database_name),
                                str(table_name)))
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    return False
                # Else, if more than one records were modified
                if change_cursor.rowcount != 1:
                    error_msg = "Could not execute\n{0}\nin {1}.{2}. Cannot continue..".format(
                        str(change_cursor.statement), str(database_name),
                        str(table_name))
                    log.error(error_msg)
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    raise mysql_utils.MySQLDatabaseException(message=error_msg)
                else:
                    info_msg = "Updated record with  successfully in {0}.{1}".format(
                        str(database_name), str(table_name))
                    log.info(info_msg)
                    cnx.commit()
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    return True
            except mysql_utils.MySQLDatabaseException as mse:
                # If a duplicate result was still found with the last UPDATE execution
                if proj_config.double_record_msg in mse.message:
                    # Inform the user with a warning message
                    warn_msg = "The record with "
                    for i in range(0, len(trigger_column_list) - 1):
                        warn_msg += str(trigger_column_list[i]) + " = " + str(
                            data_dict[trigger_column_list[i]]) + ", "

                    warn_msg += str(trigger_column_list[-1]) + " = " + str(data_dict[trigger_column_list[-1]]) + " already exists in {0}.{1}. Nothing to do then..." \
                        .format(str(database_name), str(table_name))

                    log.warning(warn_msg)

                    # Close out all database access objects
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()

                    # Nothing else that can be done here. Move out
                    return True
                else:
                    # Some other Exception was raised then
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()

                    # Forward the Exception then
                    raise mse
        else:
            select_cursor.close()
            change_cursor.close()
            cnx.close()
            # Something else must have happened then. Keep on raising the Exception
            raise mse
コード例 #4
0
def getTimeseries(device_name,
                  end_date,
                  start_date=None,
                  time_interval=None,
                  interval=None,
                  limit=100,
                  agg=None,
                  timeseries_keys_filter=None):
    """This method is the real deal, at least to establish a base methodology to retrieve hard data from the remote API server. Unlike other API based methods so far, this one requires some data to be present in the MySQL server already because
    that is where the actual method call input data is going to come from. The remote API service that retrieves the requested data requires 5 mandatory elements (the optional arguments are explicit in the calling signature of this method where
    they are set to their default values already, in case that down the line there is a need to use them): entityType, entityId, keys, startTs and endTs. The first 3 parameters are going to be retrieved with a call to the MySQL
    thingsboard_devices_table and the timestamp ones are going to be determined from the triplet start_time (mandatory), ent_time and time_interval (only one of these is required). The method returns a dictionary with a list of timestamp,
    value pairs that can or cannot be limited by the limit value
    @:type user_types allowed for this service: TENANT_ADMIN, CUSTOMER_USER
    @:param device_name (str) - The name of the device to retrieve data from (e.g., 'Thermometer A-1', 'Water Meter A-1', etc... whatever the string used when registering the device in the ThingsBoard system). This value is certainly easier to
    retained and/or memorized from the user than the id string, for instance.
    @:param end_date (datetime.datetime) - A datetime.datetime object, i.e., in the format YYYY-MM-DD hh:mm:ss but that belongs to the datetime.datetime class. This is the latest value of the interval and, to avoid invalid dates into the input (
    like future dates and such) this one is mandatory. The interval to be considered is going to be defined by either start_time (earliest) -> end_time (latest) or end_time - time_interval (in seconds) -> end_time, but one of the next two input
    arguments has to be provided.
    @:param start_date (datetime.datetime) - A datetime.datetime object delimiting the earliest point of the time interval for data retrieval
    @:param time_interval (int) - An interval, in seconds, to be subtracted to the end_time datetime object in order to define the time window to return data from
    @:param interval (int) - This is an OPTIONAL API side only parameter whose use still eludes me... so far I've tried to place calls to the remote service with all sorts of values in this field and I'm still to discover any influence of it in
    the returned results. NOTE: My initial assumption was it to be able to be set as a API side version of my time_interval. Yet, that is not the case because the API requires both the end and start timestamps to be provided by default.
    @:param limit (int) - The number of results to return in the request. Device data can be quite a lot to process and that's why this parameter, though optional, is set to 100 by default. Two things with this value: though the API doesn't
    explicitly says so, it doesn't like limit <= 0. It doesn't return an error per se but instead the service gets stuck until eventually an HTTP 503 - Service Unavailable is thrown instead. As such I'm going to validate this input accordingly.
    Also, unlike other limit parameters so far, there's no indication in the response structure if the number of results returned were limited by this value or by the time interval defined. To provide the user with more helpful information in this
    regard, this method is going to count the number of returned results and, if they do match the limit value provided, warn the user about it.
    @:param agg (str) - No idea what this one does too... The API testing interface has it set to NONE by default, though it is an optional parameter whose effect on the returned results is still yet to be understood. ALl I know so far is that the
    remote API expects a string on it
    @:param timeseries_keys_filter (list of str) - A list with strings with the keys to be returned from the remote API. Some devices contain multiple sensors, which means that there are going to be multiple records from different variables under
    the same device ID. To limit the returned results to a sub set of all parameters, provide a list in this argument with the correct names to limit the entries to be returned. Omitting this parameter (which defaults to None) returns all
    timeseries keys under the provided device ID
    @:return result_list (list of tuple) - The returned results are going to be processed and returned as a list of 2 element-tuples: a timestamp and the associated value for the timeseriesKey provided.
    @:raise utils.InputValidationException - If any of the inputs provided fails validation
    @:raise utils.ServiceEndpointException - If something goes wrong with any of the external service calls to the remote API executed in the method
    @:raise mysql_utils.MySQLDatabaseException - For errors derived from the MySQL database accesses
    @:raise Exception - For any other detected errors during the method's execution
    """
    timeseries_log = ambi_logger.get_logger(__name__)

    # Before moving forward, check if at least one of the start_time, time_interval inputs was provided. NOTE: If both inputs are present, i.e., not None, the method validates both and if both are valid it prioritizes start_time over
    # time_interval. If one of them happens to be invalid, the method execution is not stopped but the user gets warned (through the logger) about this and how the method is going to be operated. But at this stage, I'm only moving forward if I
    # have the conditions to setup a valid time window for the API request
    if not start_date and not time_interval:
        error_msg = "Please provide at least one valid start_time (datetime.datetime) or a time_interval (int). Cannot compute a time window for data retrieval otherwise.."
        timeseries_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Time for validate inputs
    utils.validate_input_type(device_name, str)
    utils.validate_input_type(end_date, datetime.datetime)
    # Limit is OPTIONAL but, because of what is explained in this method's man entry, I need this value to be sort of mandatory. This verification, given that the input is already set with a decent default value, is just to protect the
    # method's execution against a user setting it to None by whatever reason that may be
    utils.validate_input_type(limit, int)
    if start_date:
        utils.validate_input_type(start_date, datetime.datetime)
    if time_interval:
        utils.validate_input_type(time_interval, int)
    if interval:
        utils.validate_input_type(interval, int)
    if agg:
        utils.validate_input_type(agg, str)

    # Validate the argument against the list type
    if timeseries_keys_filter:
        utils.validate_input_type(timeseries_keys_filter, list)

        if len(timeseries_keys_filter) <= 0:
            timeseries_log.warning(
                "Invalid timeseries keys filter provided: empty list. This filter is going to be ignored"
            )
            timeseries_keys_filter = None
        else:
            # And each of its elements against the expected str type
            for timeseries in timeseries_keys_filter:
                utils.validate_input_type(timeseries, str)

    # Data type validation done. Now for the functional validations
    error_msg = None
    if limit <= 0:
        error_msg = "Invalid limit value: {0}. Please provide a greater than zero integer for this argument.".format(
            str(limit))
    elif end_date > datetime.datetime.now():
        error_msg = "Invalid end_date date provided: {0}! The date hasn't happen yet (future date). Please provide a valid datetime value!".format(
            str(end_date))
    elif start_date and not time_interval and start_date >= end_date:
        error_msg = "Invalid start_date date! The start_date provided ({0}) is newer/equal than/to the end_date date ({1}): invalid time window defined!".format(
            str(start_date), str(end_date))
    elif time_interval and not end_date and time_interval <= 0:
        error_msg = "Invalid time interval ({0})! Please provide a greater than zero value for this argument (the number of seconds to subtract from end_date).".format(
            str(time_interval))
    elif start_date and time_interval and start_date >= end_date and time_interval <= 0:
        error_msg = "Both start_date and time_interval arguments provided are invalid!\nThe start_date provided ({0}) is newer than the end_date indicated ({1}) and the time_interval as an invalid value ({2}).\n" \
                    "Please provide a valid (older) start_date or a valid (greater than 0) time_interval".format(str(start_date), str(end_date), str(time_interval))

    if error_msg:
        timeseries_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # And now for the cases where both valid start_time and time_interval were provided. The previous validation bundle made sure that, if only one of these two parameters was provided, it was valid. If I got to this point I can have both of these
    # parameter set to valid inputs but I need to warn the user that I'm only going to use one to define the time window
    if start_date and time_interval:
        timeseries_log.warning(
            "Both start_time and time_interval provided arguments are valid but only start_time is going to be considered moving on. Set this argument to None/Invalid to use the time_interval instead"
        )
        # So, if I'm dropping the time_interval, I need to signal this somehow moving forward:
        time_interval = None

    # Retrieve the device's credentials using the appropriate method
    device_cred = mysql_device_controller.get_device_credentials(
        device_name=device_name)

    # Check if a valid set of credentials was found
    if device_cred is None:
        error_msg = "Unable to retrieve a set of valid credentials to device '{0}'".format(
            str(device_name))
        timeseries_log.error(error_msg)
        raise mysql_utils.MySQLDatabaseException(message=error_msg)

    # The first 3 elements that I need to build the service endpoint are valid and retrieved. Lets deal with the time window then. The service endpoint requires that the limits of this window (startTs, endTs) to be passed in that weird POSIX
    # timestamp-like format that the ThingsBoard PostGres database adopted, i.e, a 13 digit number with no decimal point (10 digits for the integer part + 3 for the microseconds value... but with the decimal point omitted...). Fortunately I've
    # written the 'translate' functions already for this situation
    end_ts = mysql_utils.convert_datetime_to_timestamp_tb(end_date)

    # If the other end is defined by the start_time datetime.datetime object
    if start_date:
        # Easy
        start_ts = mysql_utils.convert_datetime_to_timestamp_tb(start_date)

    # If I got to this point in the code, given the brutality of validations undertaken so far, I can only get here with start_time = None and something valid in time_interval. Proceed accordingly
    else:
        # I need to convert this interval to a timedelta object to be able to subtract it to the end_time one
        time_interval = datetime.timedelta(seconds=int(time_interval))
        start_time = end_date - time_interval
        start_ts = mysql_utils.convert_datetime_to_timestamp_tb(start_time)

    # Done with the validations. Start building the service endpoint then.
    service_endpoint = "/api/plugins/telemetry/" + str(
        device_cred[0]) + "/" + str(device_cred[1]) + "/values/timeseries?"

    url_elements = []

    if interval:
        url_elements.append("interval=" + str(interval))

    url_elements.append("limit=" + str(limit))

    if agg:
        url_elements.append("agg=" + str(agg))

    # The element in result[2] can be a string containing multiple timeseries keys (if the device in question is a multisensor one). If a timeseries filter was provided, it is now time to apply it to reduce the number of variable types returned
    if timeseries_keys_filter:
        # Grab the original string list to a single variable
        device_ts_keys_list = str(device_cred[2])
        valid_keys = []
        for timeseries_key in timeseries_keys_filter:
            # And now check if any of the elements passed in the filter list is in the initial list
            if timeseries_key in device_ts_keys_list:
                # Add it to the valid keys list if so
                valid_keys.append(timeseries_key)
            # Otherwise warn the user of the mismatch
            else:
                timeseries_log.warning(
                    "The filter key '{0}' provided in the filter list is not a valid timeseries key. Ignoring it..."
                    .format(str(timeseries_key)))

        # If the last loop didn't yield any valid results, warn the user and default to the original string list
        if not len(valid_keys):
            timeseries_log.warning(
                "Unable to apply timeseries key filter: none of the provided keys had a match. Defaulting to {0}..."
                .format(str(device_ts_keys_list)))
            valid_keys = device_ts_keys_list
        else:
            # And inform the user of the alteration
            timeseries_log.info(
                "Valid filter found. Running remote API query with keys: {0}".
                format(str(valid_keys)))

        url_elements.append("keys=" + ",".join(valid_keys))

    else:
        # No filters required. Append the full timeseries elements then
        url_elements.append("keys=" + ",".join(device_cred[2]))

    url_elements.append("startTs=" + str(start_ts))
    url_elements.append("endTs=" + str(end_ts))

    # Done. Now mash up the whole thing into a '&' separated string
    service_endpoint += "&".join(url_elements)

    # I'm finally ready to query the remote endpoint. This service requires a REGULAR type authorization token
    service_dict = utils.build_service_calling_info(
        mac.get_auth_token(user_type='tenant_admin'), service_endpoint)

    try:
        response = requests.get(url=service_dict['url'],
                                headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout):
        error_msg = "Unable to establish a connection with {0}...".format(
            str(service_dict['url']))
        timeseries_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)

    # Check first if the response came back with a HTTP 200
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received HTTP {0} with message: {1}".format(
            str(response.status_code), str(eval(response.text)['message']))
        timeseries_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # The results are going to be returned as a dictionary of dictionaries in the following format:
        # result_dict = {
        #               "timeseries_key_1": [
        #                   {'ts': int, 'value': str},
        #                   {'ts': int, 'value': str},
        #                   ...
        #                   {'ts': int, 'value': str}
        #               ],
        #               "timeseries_key_2": [
        #                   {'ts': int, 'value': str},
        #                   {'ts': int, 'value': str},
        #                   ...
        #                   {'ts': int, 'value': str}
        #               ],
        #               ...
        #               "timeseries_key_N": [
        #                   {'ts': int, 'value': str},
        #                   {'ts': int, 'value': str},
        #                   ...
        #                   {'ts': int, 'value': str}
        #               ]
        # }
        # Use this as a reference for when another method needs to consume data from this response. Its a over complicated structure, honestly, and its not hard to create a simple method to call after this to simplify it greatly. But there's no
        # point in doing that until we know exactly what is the format that need to be returned.

        # Apply the 'eval' base method just to transform the str that is returned into a dict
        result_dict = eval(response.text)

        # Finally, check if any of the entries in the returned dictionary matches the 'limit' parameter and warn the user of potential missing results if so
        for result_key in list(result_dict.keys()):
            if len(result_dict[result_key]) == limit:
                timeseries_log.warning(
                    "Timeseries key '{0}' results were limited by the 'limit' parameter: got {1} valid results back"
                    .format(str(result_key), str(limit)))

        # Return the result dictionary finally
        return result_dict
コード例 #5
0
def get_auth_token(user_type):
    """This is going to be the go-to method in this module. This method receives one of the supported user types ('SYS_ADMIN', 'TENANT_ADMIN' or 'CUSTOMER_USER') and fetches the respective authorization token. What this method does to get it is
    abstracted from the user. This method automatically checks the usual suspects first: database table. If there's any token in there for the provided user type, it then tests it to see if it is still valid. If not, it then tries to use the
    refresh token to issue a valid one and, if that is also not possible, request a new pair of authentication and refresh tokens.
    This method should be integrated into basic service calls to save the user to deal with the whole authorization token logistics
    @:param user_type (str) - One of the following supported user types: sys_admin, tenant_admin, customer_user (the case type of this argument is irrelevant because I will take care of it later on)
    @:raise utils.InputValidationException - If an invalid argument is provided
    @:raise utils.AuthenticationException - If the authentication credentials are not correct
    @:raise utils.ServiceEndpointException - If the call to the remote service fails
    @:raise mysql_utils.MySQLDatabaseException - If problems arise when dealing with the database
    @:return token (str) - A valid authorization token that can be used to authenticate a remote service call"""

    auth_token_log = ambi_logger.get_logger(__name__)

    # Validate the input as a data type and as one of the expected user types
    utils.validate_input_type(user_type, str)

    # Set the user type string to all lower case characters to simplify comparisons from this point on
    user_type = user_type.lower()
    supported_user_types = ['sys_admin', 'tenant_admin', 'customer_user']

    if user_type not in supported_user_types:
        raise utils.InputValidationException(
            "Invalid user type provided: '{0}'. Please provided one of these: {1}"
            .format(str(user_type), str(supported_user_types)))

    # All seems good so far. Lets check the database first
    database_name = user_config.mysql_db_access['database']
    table_name = proj_config.mysql_db_tables['authentication']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)
    change_cursor = cnx.cursor(buffered=True)

    # Grab the full column list from the database for indexing purposes
    column_list = mysql_utils.get_table_columns(database_name=database_name,
                                                table_name=table_name)

    # Lets see if there's any token already in the database
    sql_select = """SELECT token, refreshToken FROM """ + str(
        table_name) + """ WHERE user_type = %s;"""

    select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select,
                                                  (user_type, ))

    # Check if any results came back
    if select_cursor.rowcount > 0:
        # I got a valid token pair back. Extract the authorization from it
        auth_token = select_cursor.fetchone()[0]

        # And call the getUser service providing the retrieved token to see if a) the token is still valid and b) the user_type provided matches what the remote API sends back
        token_status_response = tb_auth_controller.getUser(
            auth_token=auth_token)

        # And convert the response body into the expected dictionary for easier access after this point.
        # NOTE: Interesting development in this case: it turns out that I can authorize a user using a authorization token that was issued from a different ThingsBoard installation!!! In other words, I can get a authorization token issued from my
        # local ThingsBoard installation and use it to get a "valid" authentication in the remote ThingsBoard installation. When I say "valid" I mean, the interface accepts the token without any kind of feedback regarding its actual validity. Yet,
        # when I execute a service with it, guess what? I do get a HTTP 200 response but without any data!! This doesn't make any sense and is going to complicate my code a lot! So I need to deal with this retarded cases too...
        # Attempt to do the following only if something was returned back in the text parameter of the response
        token_status_dict = None

        if token_status_response.text != "":
            token_status_dict = eval(
                utils.translate_postgres_to_python(token_status_response.text))

        # This particular annoying case in which a valid authorization token from a different installation is used in this case. In this case, the installation accepts the token, since it has the expected format, but internally it gets rejected
        # because the credential pair that originated it obviously doesn't match! But somehow the API fails to mention this! Instead, the damn thing accepts the token and even returns HTTP 200 responses to my requests but these come back all
        # empty, presumably because the internal authentication failed... because the tokens are wrong. Gee, what an unnecessary mess... If a case such as that is detected, simply get a new pair of tokens back. Most of these cases are solved by
        # forcing a token refresh
        if token_status_response.status_code != 200 or token_status_response.text == "":
            # Check the most usual case for a non-HTTP 200 return: HTTP 401 with sub-errorCode (its embedded in the response text) 11 - the authorization token has expired
            if token_status_response.status_code == 401 and eval(
                    token_status_response.text)['errorCode'] == 11:
                # Inform the user first
                auth_token_log.warning(
                    "The authorization token for user type = {0} retrieved from {1}.{2} is expired. Requesting new one..."
                    .format(str(user_type), str(database_name),
                            str(table_name)))
            elif token_status_response.text == "":
                auth_token_log.warning(
                    "The authorization provided was issued from a different ThingsBoard installation than this one! Need to issued a new pair..."
                )

            # Use the refresh token to retrieve a new authorization dictionary into the proper variable. No need to provide the refresh token: the tb_auth_controller.refresh_session_token method already takes care of retrieving it from the
            # database. Create a dictionary to call this method by setting all user_types to False except the one that I want
            new_auth_dict = {
                'sys_admin': False,
                'tenant_admin': False,
                'customer_user': False,
                user_type: True
            }

            # If I caught that annoying case in which a valid authorization token from a different ThingsBoard installation
            if token_status_response.text == "":
                new_auth_dict = tb_auth_controller.get_session_tokens(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])
                auth_token_log.info(
                    "Got a new pair of authorization tokens for the {0} ThingsBoard installation."
                    .format(str(user_config.access_info['host'])))
            # Otherwise, its an expired token case. Deal with it properly then
            # NOTE: The call to the refresh_session_token method already verifies and deals with expired refreshTokens too.
            else:
                new_auth_dict = tb_auth_controller.refresh_session_token(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])
                auth_token_log.info(
                    "Refreshed the authorization tokens for the {0} ThingsBoard installation."
                    .format(str(user_config.access_info['host'])))

            # From this point on, the process is the same for both cases considered above

            # If I got to this point, then my new_auth_dict has a fresh pair of authorization and refresh tokens under the user_type key entry (the previous call raises an Exception otherwise)
            # In this case, I have the tokens in the database expired. Update these entries before returning the valid authorization token
            sql_update = mysql_utils.create_update_sql_statement(
                column_list=column_list,
                table_name=table_name,
                trigger_column_list=['user_type'])

            # Prepare the data tuple for the UPDATE operation respecting the expected order: user_type, token, token_timestamp, refreshToken, refreshToken_timestamp and user_type again (because of the WHERE clause in the UPDATE)
            update_data_tuple = (user_type, new_auth_dict[user_type]['token'],
                                 datetime.datetime.now().replace(
                                     microsecond=0),
                                 new_auth_dict[user_type]['refreshToken'],
                                 datetime.datetime.now().replace(
                                     microsecond=0), user_type)

            # Execute the statement
            change_cursor = mysql_utils.run_sql_statement(
                change_cursor, sql_update, update_data_tuple)

            # And check the execution results
            if not change_cursor.rowcount:
                error_msg = "Could not update {0}.{1} with '{2}' statement...".format(
                    str(database_name), str(table_name),
                    str(change_cursor.statement))
                auth_token_log.error(error_msg)
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                raise mysql_utils.MySQLDatabaseException(message=error_msg)
            else:
                auth_token_log.info(
                    "Token database information for user_type = '{0}' updated successfully in {1}.{2}!"
                    .format(str(user_type), str(database_name),
                            str(table_name)))
                cnx.commit()
                # Close the database access objects and return the valid token then
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                return new_auth_dict[user_type]['token']
        # Check if the response returned has the user type (which would be under the 'authority' key in the response dictionary), matches the user_type provided (it would be quite weird if doesn't, but check it anyways)
        elif token_status_dict is not None and token_status_dict[
                'authority'].lower() != user_type:
            auth_token_log.warning(
                "Attention: the authorization token retrieved from {0}.{1} for user type '{2}' provided is actually associated with a '{3}' user type! Resetting..."
                .format(str(database_name), str(table_name), str(user_type),
                        str(token_status_dict['authority'])))
            # Mismatch detected. Assuming that the ThingsBoard API only accepts user types from the set defined and since I've validated the user type provided as argument also, this means that my mismatch is at the MySQL database level,
            # that somehow has a valid authentication token submitted under a valid user type, just not the correct one
            # First, update the user_type in the database for the correct one (the one retrieved from the remote API)
            remote_user_type = token_status_dict['authority']
            # Request an UPDATE SQL template to replace the current user type by the correct remote_user_type
            sql_update = mysql_utils.create_update_sql_statement(
                column_list=['user_type'],
                table_name=table_name,
                trigger_column_list=['user_type'])
            data_tuple = (remote_user_type, user_type)

            # Execute the statement
            change_cursor = mysql_utils.run_sql_statement(
                change_cursor, sql_update, data_tuple)

            # Check if something was done
            if not change_cursor.rowcount:
                error_msg = "Update operation '{0}' in {1}.{2} not successful!".format(
                    str(change_cursor.statement), str(database_name),
                    str(table_name))
                auth_token_log.error(error_msg)
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                raise mysql_utils.MySQLDatabaseException(message=error_msg)
            else:
                # Commit the changes, warn the user, request a new authentication token for the original user_type requested, save it in the database (in a new entry given that the last one was changed) and return the valid authorization token
                # back, which should always be what this method does before exiting (either this or raise an Exception)
                cnx.commit()

                auth_token_log.warning(
                    "Successfully updated user_type = {0} entry to {1} in {2}.{3}. Requesting new authorization token to {0}..."
                    .format(str(user_type), str(remote_user_type),
                            str(database_name), str(table_name)))

                # Set out the flags for the new session token request, setting all user_types to False at first but then switching on to True only the one matching the provided user_type
                new_auth_dict = {
                    'sys_admin': False,
                    'tenant_admin': False,
                    'customer_user': False,
                    user_type: True
                }

                # And now I can request a new session token for only the user_type that I need without having to explicit a different call signature for each possible case. Clever!
                new_auth_dict = tb_auth_controller.get_session_tokens(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])

                # If I got here, it means that I have a new authorization dictionary with all entries set to None except the one corresponding to the requested user_type. Update the database and return the token back to the user. Since the
                # new_auth_dict is properly filled, I can now ignore the rest of this if-else jungle. The fact that new_auth_dict is not None anymore is going to trigger an INSERT operation with its data into the database
                pass

        # The HTTP status code is a nice 200 OK. Nothing to do but to return the valid token
        else:
            auth_token_log.info(
                "Got a still valid authorization token for user type {0} from {1}.{2}."
                .format(str(user_type), str(database_name), str(table_name)))
            # Close the database structures before returning the token
            select_cursor.close()
            change_cursor.close()
            cnx.close()
            return auth_token

    else:
        # If I get to this point it means that no valid authorization token was found so far in the database. Yet, there is a possibility that some other token request may have be been placed in the logic above and now it needs the data retrieved to
        # be sent to the database. I can detect this by looking at the new_auth_dict variable. If its None, it means that I need to request a new pair of tokens for this user_type.
        # Create a base for the new authorization dictionary by setting all user_types to False initially and then triggering just the one that needs new authorization tokens to True
        new_auth_dict = {
            'sys_admin': False,
            'tenant_admin': False,
            'customer_user': False,
            user_type: True
        }

        # And use this to request a new pair of authorization tokens from the remote API
        new_auth_dict = tb_auth_controller.get_session_tokens(
            sys_admin=new_auth_dict['sys_admin'],
            tenant_admin=new_auth_dict['tenant_admin'],
            customer_user=new_auth_dict['customer_user'])

    # In any case, I should have a new_auth_dict dictionary here with one entry filled in with a valid authorization token. Time to add it to the database
    sql_insert = mysql_utils.create_insert_sql_statement(
        column_list=column_list, table_name=table_name)

    # And create the data tuple by replacing the members in the column_list retrieved before by the corresponding values
    column_list[column_list.index('user_type')] = user_type
    column_list[column_list.index('token')] = new_auth_dict[user_type]['token']
    column_list[column_list.index(
        'token_timestamp')] = datetime.datetime.now().replace(microsecond=0)
    column_list[column_list.index(
        'refreshToken')] = new_auth_dict[user_type]['refreshToken']
    column_list[column_list.index(
        'refreshToken_timestamp')] = datetime.datetime.now().replace(
            microsecond=0)

    # Execute the statement
    change_cursor = mysql_utils.run_sql_statement(change_cursor, sql_insert,
                                                  tuple(column_list))

    if not change_cursor.rowcount:
        error_msg = "Failed to execute '{0}' in {1}.{2}. Exiting...".format(
            str(change_cursor.statement), str(database_name), str(table_name))
        auth_token_log.error(error_msg)
        change_cursor.close()
        select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)
    else:
        cnx.commit()
        auth_token_log.info(
            "Added authorization token from user_type = {0} to {1}.{2} successfully!"
            .format(str(user_type), str(database_name), str(table_name)))
        # Return the token then
        select_cursor.close()
        change_cursor.close()
        cnx.close()
        return new_auth_dict[user_type]['token']
コード例 #6
0
def populate_auth_table():
    """Use this method to request a new set of valid authorization tokens for all defined user types and to write them into the database. By default, this method starts by cleaning out any records currently in the authorization table in the
    database since this method is supposed to be called at the start of a new session
    @:raise mysql_utils.MySQLDatabaseException - If there are problems with the access to the database
    @:raise utils.AuthenticationException - If the access credentials in the user_config file are not valid
    @:raise utils.ServiceEndpointException - For problems with the remote API access"""
    populate_auth_log = ambi_logger.get_logger(__name__)

    # Start by cleaning out the authorization table
    database_name = user_config.mysql_db_access['database']
    table_name = proj_config.mysql_db_tables['authentication']
    cnx = mysql_utils.connect_db(database_name=database_name)
    column_list = mysql_utils.get_table_columns(database_name=database_name,
                                                table_name=table_name)
    change_cursor = cnx.cursor(buffered=True)

    populate_auth_log.info("Cleaning out {0}.{1}...".format(
        str(database_name), str(table_name)))

    # Prepare the DELETE statement by deleting all records older than a datetime argument
    sql_delete = """DELETE FROM """ + str(
        table_name) + """ WHERE token_timestamp < %s;"""

    # And execute it providing the current datetime as argument, thus effectively deleting all records in the database (unless some have future timestamps, which makes no sense whatsoever)
    change_cursor = mysql_utils.run_sql_statement(
        change_cursor, sql_delete,
        (datetime.datetime.now().replace(microsecond=0), ))

    if not change_cursor.rowcount:
        populate_auth_log.info(
            "{0}.{1} was already empty. Populating it now...".format(
                str(database_name), str(table_name)))
    else:
        populate_auth_log.info(
            "Deleted {0} records from {1}.{2}. Populating new records now...".
            format(str(change_cursor.rowcount), str(database_name),
                   str(table_name)))

    cnx.commit()

    # Grab a new authorization dictionary
    auth_dict = tb_auth_controller.get_session_tokens(sys_admin=True,
                                                      tenant_admin=True,
                                                      customer_user=True)

    # And populate the database accordingly
    for user_type in auth_dict:
        sql_insert = mysql_utils.create_insert_sql_statement(
            column_list=column_list, table_name=table_name)

        # Add the values to insert in the order in which they are expected, namely:
        # user_type, token, token_timestamp, refreshToken and refreshToken_timestamp
        data_tuple = (user_type, auth_dict[user_type]['token'],
                      datetime.datetime.now().replace(microsecond=0),
                      auth_dict[user_type]['refreshToken'],
                      datetime.datetime.now().replace(microsecond=0))

        # And execute the INSERT
        change_cursor = mysql_utils.run_sql_statement(change_cursor,
                                                      sql_insert, data_tuple)

        # Check if the execution was successful
        if not change_cursor.rowcount:
            error_msg = "Unable to execute '{0}' in {1}.{2}. Exiting...".format(
                str(change_cursor.statement), str(database_name),
                str(table_name))
            populate_auth_log.error(error_msg)
            change_cursor.close()
            cnx.close()
            raise mysql_utils.MySQLDatabaseException(message=error_msg)
        else:
            cnx.commit()
            populate_auth_log.info(
                "Added a pair of authorization tokens for {0} in {1}.{2} successfully!"
                .format(str(user_type), str(database_name), str(table_name)))

    # Done. Inform the user and exit
    populate_auth_log.info("{0}.{1} populated successfully!".format(
        str(database_name), str(table_name)))
コード例 #7
0
def get_asset_env_data(start_date,
                       end_date,
                       variable_list,
                       asset_name=None,
                       asset_id=None,
                       filter_nones=True):
    """
    Use this method to retrieve the environmental data between two dates specified by the pair base_date and time_interval, for each of the variables indicated in the variables list and for an asset identified by at least one of the elements in
    the pair asset_name/asset_id.
    :param start_date: (datetime.datetime) A datetime.datetime object denoting the beginning (the oldest date element) of the time window for data  retrieval
    :param end_date: (datetime.datetime) A datetime.datetime object denoting the end (the newest date element) of the time window for data retrieval.
    :param variable_list: (list of str) A list with the variable names (ontology names) that are to be retrieved from the respective database table. Each one of the elements in the list provided is going to be validated against the 'official'
    list
    in proj_config.ontology_names.
    :param asset_name: (str) The name of the asset entity from where the data is to be retrieved from. This method expects either this parameter or the respective id to be provided and does not execute unless at least one of them is present.
    :param asset_id: (str) The id string associated to an asset element in the database, i.e., the 32 byte hexadecimal string in the usual 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' format. This method expects either this element or the asset name to
    be provided before continuing. If none are present, the respective Exception is raised.
    :param filter_nones: (bool) Set this flag to True to exclude any None values from the final result dictionary. Otherwise the method returns all values, including NULL/None ones.
    :raise utils.InputValidationException: If any of the inputs fails the initial data type validation or if none of the asset identifiers (name or id) are provided.
    :raise mysql_utils.MySQLDatabaseException: If any errors occur during the database accesses.
    :return response (dict): This method returns a response dictionary in a format that is expected to be serialized and returned as a REST API response further on. For this method, the response dictionary has the following format:
        response =
            {
                env_variable_1: [
                    {
                        timestamp_1: <str>,
                        value_1: <str>
                    },
                    {
                        timestamp_2: <str>.
                        value_2: <str>
                    },
                    ...,
                    {
                        timestamp_N: <str>,
                        value_N: <str>
                    }
                ],
                env_variable_2: [
                    {
                        timestamp_1: <str>,
                        value_1: <str>
                    },
                    {
                        timestamp_2: <str>.
                        value_2: <str>
                    },
                    ...,
                    {
                        timestamp_N: <str>,
                        value_N: <str>
                    }
                ],
                ...
                env_variable_N: [
                    {
                        timestamp_1: <str>,
                        value_1: <str>
                    },
                    {
                        timestamp_2: <str>.
                        value_2: <str>
                    },
                    ...,
                    {
                        timestamp_N: <str>,
                        value_N: <str>
                    }
                ]
            }
    """
    log = ambi_logger.get_logger(__name__)

    # Validate inputs

    # Check if at least one element from the pair asset_name/asset_id was provided
    if not asset_name and not asset_id:
        error_msg = "Missing both asset name and asset id from the input parameters. Cannot continue until at least one of these is provided."
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    if asset_name:
        utils.validate_input_type(asset_name, str)

    if asset_id:
        utils.validate_id(entity_id=asset_id)

    utils.validate_input_type(start_date, datetime.datetime)
    utils.validate_input_type(end_date, datetime.datetime)

    if start_date > datetime.datetime.now():
        error_msg = "The start date provided: {0} is invalid! Please provide a past date to continue...".format(
            str(start_date))
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    if end_date > datetime.datetime.now():
        error_msg = "The end date provided: {0} is invalid! Please provide a past or current date to continue....".format(
            str(end_date))
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    if start_date >= end_date:
        error_msg = "Invalid time window for data retrieval provided: {0} -> {1}. Cannot continue until a start_date < end_date is provided!".format(
            str(start_date), str(end_date))
        log.error(msg=error_msg)
        raise utils.InputValidationException(message=error_msg)

    utils.validate_input_type(variable_list, list)
    for i in range(0, len(variable_list)):
        # Check inf the element is indeed a str as expected
        utils.validate_input_type(variable_list[i], str)
        # Take the change to normalize it to all lowercase characters
        variable_list[i] = variable_list[i].lower()

        # And check if it is indeed a valid element
        if variable_list[i] not in proj_config.ontology_names:
            log.warning(
                "Attention: the environmental variable name provided: {0} is not among the ones supported:\n{1}\nRemoving it from the variable list..."
                .format(str(variable_list[i]),
                        str(proj_config.ontology_names)))
            variable_list.remove(variable_list[i])

    # Check if the last operation didn't emptied the whole environmental variable list
    if len(variable_list) == 0:
        error_msg = "The variable list is empty! Cannot continue until at least one valid environmental variable is provided"
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # The asset_id is one of the most important parameters in this case. Use the provided arguments to either obtain it or make sure the one provided is a valid one. If both parameters were provided (asset_id and asset_name)
    if asset_id:
        # If an asset id was provided, use it to obtain the associated name
        asset_name_db = retrieve_asset_name(asset_id=asset_id)

        # Check if any names were obtained above and, if so, check if it matches any asset name also provided
        if asset_name:
            if asset_name != asset_name_db:
                log.warning(
                    "The asset name obtained from {0}.{1}: {2} does not matches the one provided: {3}. Defaulting to {2}..."
                    .format(
                        str(user_config.access_info['mysql_database']
                            ['database']),
                        str(proj_config.mysql_db_tables['tenant_assets']),
                        str(asset_name_db), str(asset_name)))
                asset_name = asset_name_db

    if not asset_id and asset_name:
        # Another case: only the asset name was provided but no associated id. Use the respective method to retrieve the asset id from the name
        asset_id = retrieve_asset_id(asset_name=asset_name)

        # Check if a valid id was indeed returned (not None)
        if not asset_id:
            error_msg = "Invalid asset id returned from {0}.{1} using asset_name = {2}. Cannot continue...".format(
                str(user_config.access_info['mysql_database']['database']),
                str(proj_config.mysql_db_tables['tenant_assets']),
                str(asset_name))
            log.error(msg=error_msg)
            raise utils.InputValidationException(message=error_msg)

    utils.validate_input_type(filter_nones, bool)

    # Initial input validation cleared. Before moving any further, implement the database access objects and use them to retrieve a unique, single name/id pair for the asset in question
    database_name = user_config.access_info['mysql_database']['database']
    asset_device_table_name = proj_config.mysql_db_tables['asset_devices']
    device_data_table_name = proj_config.mysql_db_tables['device_data']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)

    # All ready for data retrieval. First, retrieve the device_id for every device associated to the given asset
    sql_select = """SELECT toId, toName FROM """ + str(
        asset_device_table_name
    ) + """ WHERE fromEntityType = %s AND fromId = %s AND toEntityType = %s;"""

    select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                  sql_statement=sql_select,
                                                  data_tuple=('ASSET',
                                                              asset_id,
                                                              'DEVICE'))

    # Analyse the execution results
    if select_cursor.rowcount is 0:
        error_msg = "Asset (asset_name = {0}, asset_id = {1}) has no devices associated to it! Cannot continue...".format(
            str(asset_name), str(asset_id))
        log.error(msg=error_msg)
        select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)
    else:
        log.info(
            "Asset (asset_name = {0}, asset_id = {1}) has {2} devices associated."
            .format(str(asset_name), str(asset_id),
                    str(select_cursor.rowcount)))

    # Extract the devices id's to a list for easier iteration later on
    record = select_cursor.fetchone()
    device_id_list = []

    while record:
        device_id_list.append(record[0])

        # Grab another one
        record = select_cursor.fetchone()

    # Prepare a mash up of all device_id retrieved so far separated by OR statements to replace the last element in the SQL SELECT statement to execute later on
    device_id_string = []

    for _ in device_id_list:
        device_id_string.append("deviceId = %s")

    # And now connect them all into a single string stitched together with 'OR's
    device_where_str = """ OR """.join(device_id_string)

    # Store the full results in this dictionary
    result_dict = {}

    # Prepare the SQL SELECT to retrieve data from
    for i in range(0, len(variable_list)):
        # And the partial results in this one
        sql_select = """SELECT timestamp, value FROM """ + str(
            device_data_table_name) + """ WHERE ontologyId = %s AND (""" + str(
                device_where_str
            ) + """) AND (timestamp >= %s AND timestamp <= %s);"""

        # Prepare the data tuple by joining together the current ontologyId with all the deviceIds retrieved from before
        data_tuple = tuple([variable_list[i]] + device_id_list + [start_date] +
                           [end_date])

        select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                      sql_statement=sql_select,
                                                      data_tuple=data_tuple)

        # Analyse the execution outcome
        if select_cursor.rowcount > 0:
            # Results came back for this particular ontologyId. For this method, the information of the device that made the measurement is irrelevant. Create a dictionary entry for the ontologyId parameter and populate the list of dictionaries
            # with the data retrieved
            result_dict[variable_list[i]] = []

            # Process the database records
            record = select_cursor.fetchone()

            while record:
                # Check if the filtering flag is set
                if filter_nones:
                    # And if so, check if the current record has a None as its value
                    if record[1] is None:
                        # If so, grab the next record and skip the rest of this cycle
                        record = select_cursor.fetchone()
                        continue

                result_dict[variable_list[i]].append({
                    "timestamp":
                    str(int(record[0].timestamp())),
                    "value":
                    str(record[1])
                })

                # Grab the next record in line
                record = select_cursor.fetchone()

    # All done it seems. Close down the database access elements and return the results so far
    select_cursor.close()
    cnx.close()
    return result_dict
コード例 #8
0
def refresh_session_token(sys_admin=False,
                          tenant_admin=False,
                          customer_user=False):
    """This method is analogous to the get_session_token one but using the refreshToken, that is assumed to be in the respective database already, to get a valid authorization token without needing to provide the access credentials again,
    thus a more secure way to keep sessions active. This requires at least one of the input argument flags to be set to function. All three currently supported user_types can be refreshed by one call to this method, as long as the flags are set.
    The idea here being that, when requested, the remote API returns a pair of authentication token/refresh token in which the authentication token as a shorter validity period than the refresh token. But once the authentication token get expired,
    if a call to this method is placed before the refresh token also expires (which also happened, though later than the first one) allows to reset the whole thing, since using the refresh token routine results in a new, fresh pair with both
    expiration periods reset
    @:param sys_admin (bool) - Flag to set a refresh on the tokens for the SYS_ADMIN
    @:param tenant_admin (bool) - Flag to set a refresh on the tokens for the TENANT_ADMIN
    @:param customer_user (bool) - Flag to set a refresh on the tokens for the CUSTOMER_USER
    @:raise utils.AuthenticationException - If the access credentials cannot be used to retrieve authentication data
    @:raise utils.ServiceCallException - If errors happen when accessing the remote service
    @:raise utils.InputValidationException - If an invalid argument is provided (data type wise)
    @:raise mysql_utils.MySQLDatabaseException - If errors happen with the database access or with the integrity of the data in the database
    @:return auth_dict (dict) - An authentication dictionary in the same format used so far:
        auth_dict = {
            'sys_admin': {
                'token': str,
                'refreshToken': str
            },
            'tenant_admin': {
                'token': str,
                'refreshToken': str
            },
            'customer_user': {
                'token': str,
                'refreshToken': str
            }
        }
    As before, omitted user types have their sub dictionary set to None. This method only returns this structure back. Its up to the calling method to update the database table, to keep things decoupled as much as possible at this point
    """
    refresh_token_log = ambi_logger.get_logger(__name__)

    # Check first if at least one of the argument flags was set, after validation that is
    if sys_admin:
        utils.validate_input_type(sys_admin, bool)

    if tenant_admin:
        utils.validate_input_type(tenant_admin, bool)

    if customer_user:
        utils.validate_input_type(customer_user, bool)

    # If all argument flags are False
    if not (sys_admin or tenant_admin or customer_user):
        error_msg = "No user types set. Please set one of the argument flags when calling this method..."
        refresh_token_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Retrieve the existing tokens from the database into the typical authentication dictionary
    auth_dict = {
        'sys_admin': None,
        'tenant_admin': None,
        'customer_user': None
    }

    # Add all the requested user type to a list
    user_type_list = []

    # Add the relevant user type strings to a list for iterating
    if sys_admin:
        user_type_list.append('sys_admin')

    if tenant_admin:
        user_type_list.append('tenant_admin')

    if customer_user:
        user_type_list.append('customer_user')

    # And grab the results from the database
    database_name = user_config.mysql_db_access['database']
    table_name = proj_config.mysql_db_tables['authentication']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)
    select_column = 'user_type'

    sql_select = """SELECT * FROM """ + str(table_name) + """ WHERE """

    data_tuple = tuple(user_type_list)
    where_list = []

    for i in range(0, len(user_type_list)):
        where_list.append(select_column + " = %s")

    sql_select += """ OR """.join(where_list) + """;"""

    # Execute the statement then
    select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select,
                                                  data_tuple)

    # Check if any results were returned
    if not select_cursor.rowcount:
        error_msg = "The statement '{0}' didn't return any results from {1}.{2}.".format(
            str(select_cursor.statement), str(database_name), str(table_name))
        refresh_token_log.error(error_msg)
        select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)
    # Move on if you got any results back
    else:
        # Get the list of column names from the authentication table to use as a reference to obtain the data I'm looking for
        column_list = mysql_utils.get_table_columns(
            database_name=database_name, table_name=table_name)

        # Create the base endpoint (for getting a new pair of authorization tokens from an expired authorization one and a still valid refresh token, provide both tokens (with the refreshToken sent as data payload) to the '/token' endpoint
        service_endpoint = '/api/auth/token'

        # Now I can pick up a record at a time, refresh the authentication token and update the return dictionary with the reply
        result = select_cursor.fetchone()

        # Do the following as long as there is a non-None element returned from the database cursor
        while result:
            # Start by getting the standard service call structures
            con_dict = utils.build_service_calling_info(
                auth_token=result[column_list.index('token')],
                service_endpoint=service_endpoint)

            # Build the additional data payload structure which is needed for this service call in particular
            data = '{"refreshToken": "' + str(
                result[column_list.index('refreshToken')]) + '"}'

            # And call the remote API with the refresh request
            try:
                api_response = requests.post(url=con_dict['url'],
                                             headers=con_dict['headers'],
                                             data=data)
            except (requests.exceptions.ConnectionError,
                    requests.exceptions.ConnectTimeout):
                error_msg = "Unable to establish a connection with {0}:{1}. Exiting...".format(
                    str(user_config.thingsboard_host),
                    str(user_config.thingsboard_port))
                refresh_token_log.error(error_msg)
                select_cursor.close()
                cnx.close()
                raise utils.ServiceEndpointException(message=error_msg)

            # If a non-HTTP 200 status code was returned, its probably a credential issue. Raise an exception with the proper information in it
            if api_response.status_code != 200:
                # Check first if the status code is HTTP 401 and if the sub-errorCode is 11, which means that the refresh token for this user_type is also expired. In this case I can always request a new pair instead of raising an Exception
                if api_response.status_code == 401 and eval(
                        api_response.text)['errorCode'] == 11:
                    user_type = result[column_list.index('user_type')]
                    refresh_token_log.warning(
                        "The refresh token for user_type '{0}' is also expired. Requesting a new pair..."
                        .format(str(user_type)))

                    # Request for a new pair of authorization tokens but only for the user_type that has its expired so that the other user_types tokens currently in the database don't get invalidated by forcing an issue of a new pair. The
                    # get_session_token method either returns a valid pair of authorization tokens or it raises an Exception with the reason why it couldn't do it in the first place, so there's no need to verify the next call's results
                    auth_dict = get_session_tokens(
                        sys_admin=(user_type == 'sys_admin'),
                        tenant_admin=(user_type == 'tenant_admin'),
                        customer_user=(user_type == 'customer_user'))

                    return auth_dict

                # If the error was something other than HTTP 401 with a sub-errorCode of 10, raise an Exception with the error details
                else:
                    refresh_token_log.error(api_response.text)
                    select_cursor.close()
                    cnx.close()
                    raise utils.AuthenticationException(
                        message=api_response.text,
                        error_code=api_response.status_code)

            # Got a pair of valid tokens back. Update the structures then
            else:
                auth_dict[result[column_list.index('user_type')]] = eval(
                    api_response.text)

            # And grab the next result for another iteration of this
            result = select_cursor.fetchone()

        # The while loop is done here and I've processed all results thus far. All its left to do is return the updated authorization dictionary
        return auth_dict