def retrieve_asset_name(asset_id):
    """
    This method mirrors the last one in the sense that it receives an asset_id and returns the associated name, if any.
    :param asset_id: (str) A 32-byte hexadecimal string in the expected xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format. This element is configured in the database as the primary key, which means that this method should never receive multiple records.
    :raise utils.InputValidationException: If the the input argument fails the initial data type validation
    :raise mysql_utils.MySQLDatabaseException: If any errors occur when consulting the database.
    :return asset_name: (str) If an unique match was found, None otherwise
    """
    log = ambi_logger.get_logger(__name__)

    # Validate the input
    utils.validate_id(entity_id=asset_id)

    # Prepare the database access elements
    database_name = user_config.access_info['mysql_database']['database']
    table_name = proj_config.mysql_db_tables['tenant_assets']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)

    sql_select = """SELECT entityType, name FROM """ + str(
        table_name) + """ WHERE id = %s;"""
    select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                  sql_statement=sql_select,
                                                  data_tuple=(asset_id, ))

    # Analyse the execution
    if select_cursor.rowcount is 0:
        log.warning(
            "No records returned from {0}.{1} using asset_id = {2}".format(
                str(database_name), str(table_name), str(asset_id)))
        select_cursor.close()
        cnx.close()
        return None
    elif select_cursor.rowcount > 1:
        log.warning(
            "{0}.{1} returned {2} records using asset_id = {3}. Cannot continue..."
            .format(str(database_name), str(table_name),
                    str(select_cursor.rowcount), str(asset_id)))
        select_cursor.close()
        cnx.close()
        return None
    else:
        # A single return came back. Process it then
        record = select_cursor.fetchone()
        if record[0] != 'ASSET':
            error_msg = "The record returned from {0}.{1} using asset id = {2} has a wrong entityType. Got a {3}, expected an 'ASSET'".format(
                str(database_name), str(table_name),
                str(select_cursor.rowcount), str(record[0]))
            log.error(msg=error_msg)
            select_cursor.close()
            cnx.close()
            return None
        else:
            # All is good so far. Check if the name returned is indeed a str and return it if so
            asset_name = record[1]
            utils.validate_input_type(asset_name, str)
            select_cursor.close()
            cnx.close()
            return asset_name
Exemple #2
0
def get_device_credentials(device_name):
    """
    This method simplifies a recurring task: determining the entityType, entityId and timeseriesKeys associated to the device identified by 'device_name'.
    Since most remote services provided by the Thingsboard remote API are fond of using this entityType/entityId pair to uniquely identify every entity configured in the platform (tenants, customers, devices, etc.) but us humans are more keen to
    rely on names to identify the same entities, this method integrates both approaches for the devices context: it searches the current database entries for the name provided and, if a unique result is obtained, returns its associated entityType,
    entityId and timeseriesKeys.
    @:param device_name (str) - The name of the device, as it should be defined via the 'name' field in the respective Thingsboard installation
    @:raise utils.InputValidationException - If the input fails initial validation
    @:raise mysql_utils.MySQLDatabaseException - If the database access incurs in problems
    @:return None if no single device could be found or if there are no credentials currently configured in the database for this device. Otherwise, the credentials are going to be returned in the following format:
    device_credentials = (entityType (str), entityId (str), timeseriesKeys(list of str))
    """
    log = ambi_logger.get_logger(__name__)

    utils.validate_input_type(device_name, str)

    # Create the typical database access objects
    database_name = user_config.access_info['mysql_database']['database']
    table_name = proj_config.mysql_db_tables['devices']
    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)

    # Now for the tricky part: to run a SQL SELECT using the device name as the only filter and ensuring than one and only one record is returned. For this I'm going to use the SQL 'LIKE' operator and using all four wildcard options in the search
    # field: 'device_name', '%device_name', 'device_name%' and '%device_name%'.
    search_vector = [device_name, '%' + device_name, device_name + '%', '%' + device_name + '%']
    sql_select = """SELECT entityType, id, timeseriesKeys FROM """ + table_name + """ WHERE name LIKE %s;"""

    # Run a SQL SELECT with all elements of the search vector, stopping the loop as soon as a single record gets returned back
    result = None
    for i in range(0, len(search_vector)):
        select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor, sql_statement=sql_select, data_tuple=(search_vector[i],))

        if select_cursor.rowcount != 1:
            log.warning("Searching for name = {0}. Got {1} results back. Expanding search with new wildcards...".format(str(search_vector[i]), str(select_cursor.rowcount)))
            # Ignore the rest of this loop in this case then
            continue
        else:
            log.info("Got an unique record while searching for name = {0}. Moving on".format(str(search_vector[i])))
            # Set the result parameter
            result = select_cursor.fetchone()
            # Get out of this loop in this case
            break

    # Check if a valid result was found. Continue if so, return None otherwise
    if result is None:
        log.warning("Could not retrieve an unique record for device_name = {0} in {1}.{2}. Nothing more to do...".format(str(device_name), str(database_name), str(table_name)))
        return None
    else:
        # Extract the timeseriesKey string to a list of strings and prepare the return tuple
        timeseriesKeys_list = []

        # The list of timeseries keys is kept in the database as single string with all elements separated with a comma: timeseriesKeys = "timeseriesKey1,timeseriesKey2,...,timeseriesKeyN"
        tokens = result[2].split(',')

        for i in range(0, len(tokens)):
            timeseriesKeys_list.append(tokens[i])

        # All done. Return the final structure then
        return result[0], result[1], timeseriesKeys_list
def update_asset_devices_table():
    """Use this method to fill out the asset devices table that corresponds devices to the assets that are related to them. The idea here is to use ASSETs to represent spaces and the ThingsBoard relation property to associate DEVICEs to those
    assets as a way to represent the devices currently installed and monitoring that space
    @:raise mysql_utils.MySQLDatabaseException - For problems related with the database access
    @:raise utils.ServiceEndpointException - For issues related with the remote API call
    @:raise utils.AuthenticationException - For problems related with the authentication credentials used"""

    asset_devices_log = ambi_logger.get_logger(__name__)

    asset_devices_table_name = proj_config.mysql_db_tables['asset_devices']
    assets_table_name = proj_config.mysql_db_tables['tenant_assets']
    devices_table_name = proj_config.mysql_db_tables['devices']

    # Fire up the database access objects
    database_name = user_config.access_info['mysql_database']['database']
    cnx = mysql_utils.connect_db(database_name=database_name)
    outer_select_cursor = cnx.cursor(buffered=True)
    inner_select_cursor = cnx.cursor(buffered=True)

    mysql_utils.reset_table(table_name=asset_devices_table_name)

    # First get a list of all the assets supported so far. Refresh the asset database table first of all
    mysql_asset_controller.update_tenant_assets_table()
    # And the devices table too since I need data from there too later on
    mysql_device_controller.update_devices_table()

    # And grab all assets ids, names and types (I need those for later)
    sql_select = """SELECT id, name, type FROM """ + str(
        assets_table_name) + """;"""

    # Execute the statement with the select cursor
    outer_select_cursor = mysql_utils.run_sql_statement(
        cursor=outer_select_cursor, sql_statement=sql_select, data_tuple=())

    # Check if any results came back
    if outer_select_cursor.rowcount <= 0:
        error_msg = "Unable to get any results from {0}.{1} with '{2}'...".format(
            str(database_name), str(assets_table_name),
            str(outer_select_cursor.statement))
        asset_devices_log.error(error_msg)
        outer_select_cursor.close()
        inner_select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)

    # Got some results. Process them then
    else:
        # For each valid ASSET Id found in this table, run a query in the ThingsBoard side of things for all DEVICEs that have a relation to that asset and send it to the database
        asset_info = outer_select_cursor.fetchone()

        # Set the common used parameters for the entity-relation call
        entityType = "ASSET"
        relationTypeGroup = "COMMON"
        direction = "FROM"

        # Run this while there are still asset ids to process
        while asset_info:

            # Query for related devices
            api_response = tb_entity_relation_controller.findByQuery(
                entityType=entityType,
                entityId=asset_info[0],
                relationTypeGroup=relationTypeGroup,
                direction=direction)

            # Get rid of all non-Python terms in the response dictionary and cast it as a list too
            relation_list = eval(
                utils.translate_postgres_to_python(api_response.text))

            # Now lets format this info accordingly and send it to the database
            for relation in relation_list:
                # Create a dictionary to store all the data to send to the database. For now its easier to manipulate one of these and cast it to a tuple just before executing the statement
                data_dict = {
                    "fromEntityType": relation["from"]["entityType"],
                    "fromId": relation["from"]["id"],
                    "fromName": asset_info[1],
                    "fromType": asset_info[2],
                    "toEntityType": relation["to"]["entityType"],
                    "toId": relation["to"]["id"],
                    "toName": None,
                    "toType": None,
                    "relationType": relation["type"],
                    "relationGroup": relation["typeGroup"],
                }

                # As always, take care with the stupid 'description'/'additionalInfo' issue...
                if relation["additionalInfo"] is not None:
                    try:
                        # Try to get a 'description' from the returned dictionary from the 'additionalInfo' sub dictionary
                        data_dict["description"] = relation["additionalInfo"][
                            "description"]
                    # If the field wasn't set, instead of crashing the code
                    except KeyError:
                        # Simply set this field to None and move on with it...
                        data_dict["description"] = None
                else:
                    data_dict["description"] = None

                # And now to get the data to use in the INSERT statement. For that I need to do a quick SELECT first since I need info from a different side too
                if data_dict['toEntityType'] == 'DEVICE':
                    sql_select = """SELECT name, type FROM """ + str(
                        devices_table_name) + """ WHERE id = %s;"""
                elif data_dict['toEntityType'] == 'ASSET':
                    sql_select = """SELECT name, type FROM """ + str(
                        assets_table_name) + """ WHERE id = %s;"""

                data_tuple = (relation['to']['id'], )

                # NOTE: I need to use the change cursor because my select_cursor still has unprocessed results from the previous SELECT execution and using it would delete those
                inner_select_cursor = mysql_utils.run_sql_statement(
                    cursor=inner_select_cursor,
                    sql_statement=sql_select,
                    data_tuple=data_tuple)

                # Check if any results were returned. Use them if so, otherwise replace the missing results with 'Unknown'. In this case there's no point in raising Exceptions if I can't find the actual name or type of the related device
                if inner_select_cursor.rowcount > 0:
                    # I got the data I was looking for from the devices table
                    result = inner_select_cursor.fetchone()
                    data_dict["toName"] = result[0]
                    data_dict["toType"] = result[1]

                # Write the data in the database table
                mysql_utils.add_data_to_table(
                    table_name=asset_devices_table_name, data_dict=data_dict)

            # Finished with the current asset. Fetch the next one and repeat the cycle if its not None
            asset_info = outer_select_cursor.fetchone()

        # Done with everything, I believe. Close the database access objects and carry on
        outer_select_cursor.close()
        inner_select_cursor.close()
        cnx.close()
Exemple #4
0
def add_table_data(data_dict, table_name):
    """
    Method that abstracts the insertion of data into the provided database table. The method maps the provided data dictionary to any available column in the table identified in table name. The method uses the table data as main reference, i.e.,
    it only writes data whose key in the data dictionary has a direct correspondence to a table column in the database. If more the data dictionary has more keys/items than available columns, an log warning is issued about it but the method
    carries on writing in all available data.
    @:param data_dict (dict) - A dict structure, i.e., a key-value arrangement with the data to be added/updated into the database table. IMPORTANT: The table columns name were prepared such that there's a one-to-one equivalence between them and
    the expected keys in the data dictionary.
    @:param table_name (str) - The name of the database where the data dict has to be written into.
    @:raise utils.InputValidationException - If any of the inputs fails initial validation.
    @:raise mysql_utils.MySQLDatabaseException - If any issues occur with the database accesses.
    @:return result (bool) - If the database addition/update was performed successfully, this method returns True. Otherwise, the appropriate exception is raised with the details on why it was raised in the first place.
    """
    log = ambi_logger.get_logger(__name__)

    # Validate inputs
    utils.validate_input_type(data_dict, dict)
    utils.validate_input_type(table_name, str)

    # Prepare the database access objects
    database_name = user_config.access_info['mysql_database']['database']
    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)
    change_cursor = cnx.cursor(buffered=True)

    # First, check if the table exists
    sql_select = """SHOW tables FROM """ + str(database_name) + """;"""

    select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                  sql_statement=sql_select,
                                                  data_tuple=())

    records = select_cursor.fetchall()

    # The result of the previous fetchall command is a list of one element tuples, hence why I'm putting the name that I want to verify its existence in such list as a one element tuple. The alternative was to format the previous list into a
    # single string list element, but it is way more simple this way
    if (table_name, ) not in records:
        error_msg = "The table name provided: {0} doesn't exist yet in database {1}. Cannot continue.".format(
            str(table_name), str(database_name))
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Okay, the table exists in the database. Get all its columns into a list
    column_list = mysql_utils.get_table_columns(database_name=database_name,
                                                table_name=table_name)

    # And create the standard INSERT statement from it
    sql_insert = mysql_utils.create_insert_sql_statement(
        column_list=column_list, table_name=table_name)

    # Build the respective data tuple by going through all column names and checking if there is a corresponding key in the data dictionary
    data_list = []

    for i in range(0, len(column_list)):
        try:
            # First, try to retrieve a value into the data list by doing a direct retrieval from the data dictionary using the column name as key
            data_list.append(data_dict[column_list[i]])

        # If the current column name doesn't have a matching key in the data dictionary, catch the expected Exception
        except KeyError:
            # And replace the missing value with a None since, by default, all table columns were created in a way where they hold such value
            # But warn the user first
            log.warning(
                "Didn't find any '{0}' keys in the data dictionary provided. Setting the {0} column in {1}.{2} to NULL"
                .format(str(column_list[0]), str(database_name),
                        str(table_name)))
            # And set the value then
            data_list.append(None)

    # Done. Proceed with the INSERT
    try:
        change_cursor = mysql_utils.run_sql_statement(
            cursor=change_cursor,
            sql_statement=sql_insert,
            data_tuple=tuple(data_list))

        # Check the outcome of the previous execution. If no columns were changed in the previous statement, raise a 'Duplicate entry' Exception to trigger an UPDATE instead
        if change_cursor.rowcount is 0:
            # No changes to the database table detected. Trigger an UPDATE then
            raise mysql_utils.MySQLDatabaseException(
                message=proj_config.double_record_msg)
        elif change_cursor.rowcount == 1:
            # In this case, all went well. Close the database access objects, commit the changes to the database, inform the user of this and move on
            log.info("Successfully added a new record to {0}.{1}".format(
                str(database_name), str(table_name)))
            cnx.commit()
            select_cursor.close()
            change_cursor.close()
            cnx.close()
            return True

    # Watch out for the typical "Duplicate entry" exception
    except mysql_utils.MySQLDatabaseException as mse:
        if proj_config.double_record_msg in mse.message:
            trigger_column_list = mysql_utils.get_trigger_columns(
                table_name=table_name)

            # Cool. Use this data to get the respective UPDATE statement
            sql_update = mysql_utils.create_update_sql_statement(
                column_list=column_list,
                table_name=table_name,
                trigger_column_list=trigger_column_list)

            # And complete the existing data list by appending to it the values corresponding to the elements in the trigger list
            for trigger_column_name in trigger_column_list:
                try:
                    data_list.append(data_dict[trigger_column_name])
                except KeyError:
                    error_msg = "The value for the trigger column '{0}' cannot be found among the data dictionary elements! Cannot continue!".format(
                        str(trigger_column_name))
                    log.error(error_msg)
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    raise mysql_utils.MySQLDatabaseException(message=error_msg)

            # Done. Run the UPDATE statement, still looking for duplicate records
            try:
                change_cursor = mysql_utils.run_sql_statement(
                    cursor=change_cursor,
                    sql_statement=sql_update,
                    data_tuple=tuple(data_list))

                # Check the UPDATE execution status
                if change_cursor.rowcount is 0:
                    # If nothing happened, the record already exists in the database. Give a bit of heads up and move on
                    log.warning(
                        "A record with data:\n{0}\n already exists in {1}.{2}. Nothing more to do..."
                        .format(str(data_list), str(database_name),
                                str(table_name)))
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    return False
                # Else, if more than one records were modified
                if change_cursor.rowcount != 1:
                    error_msg = "Could not execute\n{0}\nin {1}.{2}. Cannot continue..".format(
                        str(change_cursor.statement), str(database_name),
                        str(table_name))
                    log.error(error_msg)
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    raise mysql_utils.MySQLDatabaseException(message=error_msg)
                else:
                    info_msg = "Updated record with  successfully in {0}.{1}".format(
                        str(database_name), str(table_name))
                    log.info(info_msg)
                    cnx.commit()
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    return True
            except mysql_utils.MySQLDatabaseException as mse:
                # If a duplicate result was still found with the last UPDATE execution
                if proj_config.double_record_msg in mse.message:
                    # Inform the user with a warning message
                    warn_msg = "The record with "
                    for i in range(0, len(trigger_column_list) - 1):
                        warn_msg += str(trigger_column_list[i]) + " = " + str(
                            data_dict[trigger_column_list[i]]) + ", "

                    warn_msg += str(trigger_column_list[-1]) + " = " + str(data_dict[trigger_column_list[-1]]) + " already exists in {0}.{1}. Nothing to do then..." \
                        .format(str(database_name), str(table_name))

                    log.warning(warn_msg)

                    # Close out all database access objects
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()

                    # Nothing else that can be done here. Move out
                    return True
                else:
                    # Some other Exception was raised then
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()

                    # Forward the Exception then
                    raise mse
        else:
            select_cursor.close()
            change_cursor.close()
            cnx.close()
            # Something else must have happened then. Keep on raising the Exception
            raise mse
def get_auth_token(user_type):
    """This is going to be the go-to method in this module. This method receives one of the supported user types ('SYS_ADMIN', 'TENANT_ADMIN' or 'CUSTOMER_USER') and fetches the respective authorization token. What this method does to get it is
    abstracted from the user. This method automatically checks the usual suspects first: database table. If there's any token in there for the provided user type, it then tests it to see if it is still valid. If not, it then tries to use the
    refresh token to issue a valid one and, if that is also not possible, request a new pair of authentication and refresh tokens.
    This method should be integrated into basic service calls to save the user to deal with the whole authorization token logistics
    @:param user_type (str) - One of the following supported user types: sys_admin, tenant_admin, customer_user (the case type of this argument is irrelevant because I will take care of it later on)
    @:raise utils.InputValidationException - If an invalid argument is provided
    @:raise utils.AuthenticationException - If the authentication credentials are not correct
    @:raise utils.ServiceEndpointException - If the call to the remote service fails
    @:raise mysql_utils.MySQLDatabaseException - If problems arise when dealing with the database
    @:return token (str) - A valid authorization token that can be used to authenticate a remote service call"""

    auth_token_log = ambi_logger.get_logger(__name__)

    # Validate the input as a data type and as one of the expected user types
    utils.validate_input_type(user_type, str)

    # Set the user type string to all lower case characters to simplify comparisons from this point on
    user_type = user_type.lower()
    supported_user_types = ['sys_admin', 'tenant_admin', 'customer_user']

    if user_type not in supported_user_types:
        raise utils.InputValidationException(
            "Invalid user type provided: '{0}'. Please provided one of these: {1}"
            .format(str(user_type), str(supported_user_types)))

    # All seems good so far. Lets check the database first
    database_name = user_config.mysql_db_access['database']
    table_name = proj_config.mysql_db_tables['authentication']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)
    change_cursor = cnx.cursor(buffered=True)

    # Grab the full column list from the database for indexing purposes
    column_list = mysql_utils.get_table_columns(database_name=database_name,
                                                table_name=table_name)

    # Lets see if there's any token already in the database
    sql_select = """SELECT token, refreshToken FROM """ + str(
        table_name) + """ WHERE user_type = %s;"""

    select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select,
                                                  (user_type, ))

    # Check if any results came back
    if select_cursor.rowcount > 0:
        # I got a valid token pair back. Extract the authorization from it
        auth_token = select_cursor.fetchone()[0]

        # And call the getUser service providing the retrieved token to see if a) the token is still valid and b) the user_type provided matches what the remote API sends back
        token_status_response = tb_auth_controller.getUser(
            auth_token=auth_token)

        # And convert the response body into the expected dictionary for easier access after this point.
        # NOTE: Interesting development in this case: it turns out that I can authorize a user using a authorization token that was issued from a different ThingsBoard installation!!! In other words, I can get a authorization token issued from my
        # local ThingsBoard installation and use it to get a "valid" authentication in the remote ThingsBoard installation. When I say "valid" I mean, the interface accepts the token without any kind of feedback regarding its actual validity. Yet,
        # when I execute a service with it, guess what? I do get a HTTP 200 response but without any data!! This doesn't make any sense and is going to complicate my code a lot! So I need to deal with this retarded cases too...
        # Attempt to do the following only if something was returned back in the text parameter of the response
        token_status_dict = None

        if token_status_response.text != "":
            token_status_dict = eval(
                utils.translate_postgres_to_python(token_status_response.text))

        # This particular annoying case in which a valid authorization token from a different installation is used in this case. In this case, the installation accepts the token, since it has the expected format, but internally it gets rejected
        # because the credential pair that originated it obviously doesn't match! But somehow the API fails to mention this! Instead, the damn thing accepts the token and even returns HTTP 200 responses to my requests but these come back all
        # empty, presumably because the internal authentication failed... because the tokens are wrong. Gee, what an unnecessary mess... If a case such as that is detected, simply get a new pair of tokens back. Most of these cases are solved by
        # forcing a token refresh
        if token_status_response.status_code != 200 or token_status_response.text == "":
            # Check the most usual case for a non-HTTP 200 return: HTTP 401 with sub-errorCode (its embedded in the response text) 11 - the authorization token has expired
            if token_status_response.status_code == 401 and eval(
                    token_status_response.text)['errorCode'] == 11:
                # Inform the user first
                auth_token_log.warning(
                    "The authorization token for user type = {0} retrieved from {1}.{2} is expired. Requesting new one..."
                    .format(str(user_type), str(database_name),
                            str(table_name)))
            elif token_status_response.text == "":
                auth_token_log.warning(
                    "The authorization provided was issued from a different ThingsBoard installation than this one! Need to issued a new pair..."
                )

            # Use the refresh token to retrieve a new authorization dictionary into the proper variable. No need to provide the refresh token: the tb_auth_controller.refresh_session_token method already takes care of retrieving it from the
            # database. Create a dictionary to call this method by setting all user_types to False except the one that I want
            new_auth_dict = {
                'sys_admin': False,
                'tenant_admin': False,
                'customer_user': False,
                user_type: True
            }

            # If I caught that annoying case in which a valid authorization token from a different ThingsBoard installation
            if token_status_response.text == "":
                new_auth_dict = tb_auth_controller.get_session_tokens(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])
                auth_token_log.info(
                    "Got a new pair of authorization tokens for the {0} ThingsBoard installation."
                    .format(str(user_config.access_info['host'])))
            # Otherwise, its an expired token case. Deal with it properly then
            # NOTE: The call to the refresh_session_token method already verifies and deals with expired refreshTokens too.
            else:
                new_auth_dict = tb_auth_controller.refresh_session_token(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])
                auth_token_log.info(
                    "Refreshed the authorization tokens for the {0} ThingsBoard installation."
                    .format(str(user_config.access_info['host'])))

            # From this point on, the process is the same for both cases considered above

            # If I got to this point, then my new_auth_dict has a fresh pair of authorization and refresh tokens under the user_type key entry (the previous call raises an Exception otherwise)
            # In this case, I have the tokens in the database expired. Update these entries before returning the valid authorization token
            sql_update = mysql_utils.create_update_sql_statement(
                column_list=column_list,
                table_name=table_name,
                trigger_column_list=['user_type'])

            # Prepare the data tuple for the UPDATE operation respecting the expected order: user_type, token, token_timestamp, refreshToken, refreshToken_timestamp and user_type again (because of the WHERE clause in the UPDATE)
            update_data_tuple = (user_type, new_auth_dict[user_type]['token'],
                                 datetime.datetime.now().replace(
                                     microsecond=0),
                                 new_auth_dict[user_type]['refreshToken'],
                                 datetime.datetime.now().replace(
                                     microsecond=0), user_type)

            # Execute the statement
            change_cursor = mysql_utils.run_sql_statement(
                change_cursor, sql_update, update_data_tuple)

            # And check the execution results
            if not change_cursor.rowcount:
                error_msg = "Could not update {0}.{1} with '{2}' statement...".format(
                    str(database_name), str(table_name),
                    str(change_cursor.statement))
                auth_token_log.error(error_msg)
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                raise mysql_utils.MySQLDatabaseException(message=error_msg)
            else:
                auth_token_log.info(
                    "Token database information for user_type = '{0}' updated successfully in {1}.{2}!"
                    .format(str(user_type), str(database_name),
                            str(table_name)))
                cnx.commit()
                # Close the database access objects and return the valid token then
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                return new_auth_dict[user_type]['token']
        # Check if the response returned has the user type (which would be under the 'authority' key in the response dictionary), matches the user_type provided (it would be quite weird if doesn't, but check it anyways)
        elif token_status_dict is not None and token_status_dict[
                'authority'].lower() != user_type:
            auth_token_log.warning(
                "Attention: the authorization token retrieved from {0}.{1} for user type '{2}' provided is actually associated with a '{3}' user type! Resetting..."
                .format(str(database_name), str(table_name), str(user_type),
                        str(token_status_dict['authority'])))
            # Mismatch detected. Assuming that the ThingsBoard API only accepts user types from the set defined and since I've validated the user type provided as argument also, this means that my mismatch is at the MySQL database level,
            # that somehow has a valid authentication token submitted under a valid user type, just not the correct one
            # First, update the user_type in the database for the correct one (the one retrieved from the remote API)
            remote_user_type = token_status_dict['authority']
            # Request an UPDATE SQL template to replace the current user type by the correct remote_user_type
            sql_update = mysql_utils.create_update_sql_statement(
                column_list=['user_type'],
                table_name=table_name,
                trigger_column_list=['user_type'])
            data_tuple = (remote_user_type, user_type)

            # Execute the statement
            change_cursor = mysql_utils.run_sql_statement(
                change_cursor, sql_update, data_tuple)

            # Check if something was done
            if not change_cursor.rowcount:
                error_msg = "Update operation '{0}' in {1}.{2} not successful!".format(
                    str(change_cursor.statement), str(database_name),
                    str(table_name))
                auth_token_log.error(error_msg)
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                raise mysql_utils.MySQLDatabaseException(message=error_msg)
            else:
                # Commit the changes, warn the user, request a new authentication token for the original user_type requested, save it in the database (in a new entry given that the last one was changed) and return the valid authorization token
                # back, which should always be what this method does before exiting (either this or raise an Exception)
                cnx.commit()

                auth_token_log.warning(
                    "Successfully updated user_type = {0} entry to {1} in {2}.{3}. Requesting new authorization token to {0}..."
                    .format(str(user_type), str(remote_user_type),
                            str(database_name), str(table_name)))

                # Set out the flags for the new session token request, setting all user_types to False at first but then switching on to True only the one matching the provided user_type
                new_auth_dict = {
                    'sys_admin': False,
                    'tenant_admin': False,
                    'customer_user': False,
                    user_type: True
                }

                # And now I can request a new session token for only the user_type that I need without having to explicit a different call signature for each possible case. Clever!
                new_auth_dict = tb_auth_controller.get_session_tokens(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])

                # If I got here, it means that I have a new authorization dictionary with all entries set to None except the one corresponding to the requested user_type. Update the database and return the token back to the user. Since the
                # new_auth_dict is properly filled, I can now ignore the rest of this if-else jungle. The fact that new_auth_dict is not None anymore is going to trigger an INSERT operation with its data into the database
                pass

        # The HTTP status code is a nice 200 OK. Nothing to do but to return the valid token
        else:
            auth_token_log.info(
                "Got a still valid authorization token for user type {0} from {1}.{2}."
                .format(str(user_type), str(database_name), str(table_name)))
            # Close the database structures before returning the token
            select_cursor.close()
            change_cursor.close()
            cnx.close()
            return auth_token

    else:
        # If I get to this point it means that no valid authorization token was found so far in the database. Yet, there is a possibility that some other token request may have be been placed in the logic above and now it needs the data retrieved to
        # be sent to the database. I can detect this by looking at the new_auth_dict variable. If its None, it means that I need to request a new pair of tokens for this user_type.
        # Create a base for the new authorization dictionary by setting all user_types to False initially and then triggering just the one that needs new authorization tokens to True
        new_auth_dict = {
            'sys_admin': False,
            'tenant_admin': False,
            'customer_user': False,
            user_type: True
        }

        # And use this to request a new pair of authorization tokens from the remote API
        new_auth_dict = tb_auth_controller.get_session_tokens(
            sys_admin=new_auth_dict['sys_admin'],
            tenant_admin=new_auth_dict['tenant_admin'],
            customer_user=new_auth_dict['customer_user'])

    # In any case, I should have a new_auth_dict dictionary here with one entry filled in with a valid authorization token. Time to add it to the database
    sql_insert = mysql_utils.create_insert_sql_statement(
        column_list=column_list, table_name=table_name)

    # And create the data tuple by replacing the members in the column_list retrieved before by the corresponding values
    column_list[column_list.index('user_type')] = user_type
    column_list[column_list.index('token')] = new_auth_dict[user_type]['token']
    column_list[column_list.index(
        'token_timestamp')] = datetime.datetime.now().replace(microsecond=0)
    column_list[column_list.index(
        'refreshToken')] = new_auth_dict[user_type]['refreshToken']
    column_list[column_list.index(
        'refreshToken_timestamp')] = datetime.datetime.now().replace(
            microsecond=0)

    # Execute the statement
    change_cursor = mysql_utils.run_sql_statement(change_cursor, sql_insert,
                                                  tuple(column_list))

    if not change_cursor.rowcount:
        error_msg = "Failed to execute '{0}' in {1}.{2}. Exiting...".format(
            str(change_cursor.statement), str(database_name), str(table_name))
        auth_token_log.error(error_msg)
        change_cursor.close()
        select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)
    else:
        cnx.commit()
        auth_token_log.info(
            "Added authorization token from user_type = {0} to {1}.{2} successfully!"
            .format(str(user_type), str(database_name), str(table_name)))
        # Return the token then
        select_cursor.close()
        change_cursor.close()
        cnx.close()
        return new_auth_dict[user_type]['token']
def populate_auth_table():
    """Use this method to request a new set of valid authorization tokens for all defined user types and to write them into the database. By default, this method starts by cleaning out any records currently in the authorization table in the
    database since this method is supposed to be called at the start of a new session
    @:raise mysql_utils.MySQLDatabaseException - If there are problems with the access to the database
    @:raise utils.AuthenticationException - If the access credentials in the user_config file are not valid
    @:raise utils.ServiceEndpointException - For problems with the remote API access"""
    populate_auth_log = ambi_logger.get_logger(__name__)

    # Start by cleaning out the authorization table
    database_name = user_config.mysql_db_access['database']
    table_name = proj_config.mysql_db_tables['authentication']
    cnx = mysql_utils.connect_db(database_name=database_name)
    column_list = mysql_utils.get_table_columns(database_name=database_name,
                                                table_name=table_name)
    change_cursor = cnx.cursor(buffered=True)

    populate_auth_log.info("Cleaning out {0}.{1}...".format(
        str(database_name), str(table_name)))

    # Prepare the DELETE statement by deleting all records older than a datetime argument
    sql_delete = """DELETE FROM """ + str(
        table_name) + """ WHERE token_timestamp < %s;"""

    # And execute it providing the current datetime as argument, thus effectively deleting all records in the database (unless some have future timestamps, which makes no sense whatsoever)
    change_cursor = mysql_utils.run_sql_statement(
        change_cursor, sql_delete,
        (datetime.datetime.now().replace(microsecond=0), ))

    if not change_cursor.rowcount:
        populate_auth_log.info(
            "{0}.{1} was already empty. Populating it now...".format(
                str(database_name), str(table_name)))
    else:
        populate_auth_log.info(
            "Deleted {0} records from {1}.{2}. Populating new records now...".
            format(str(change_cursor.rowcount), str(database_name),
                   str(table_name)))

    cnx.commit()

    # Grab a new authorization dictionary
    auth_dict = tb_auth_controller.get_session_tokens(sys_admin=True,
                                                      tenant_admin=True,
                                                      customer_user=True)

    # And populate the database accordingly
    for user_type in auth_dict:
        sql_insert = mysql_utils.create_insert_sql_statement(
            column_list=column_list, table_name=table_name)

        # Add the values to insert in the order in which they are expected, namely:
        # user_type, token, token_timestamp, refreshToken and refreshToken_timestamp
        data_tuple = (user_type, auth_dict[user_type]['token'],
                      datetime.datetime.now().replace(microsecond=0),
                      auth_dict[user_type]['refreshToken'],
                      datetime.datetime.now().replace(microsecond=0))

        # And execute the INSERT
        change_cursor = mysql_utils.run_sql_statement(change_cursor,
                                                      sql_insert, data_tuple)

        # Check if the execution was successful
        if not change_cursor.rowcount:
            error_msg = "Unable to execute '{0}' in {1}.{2}. Exiting...".format(
                str(change_cursor.statement), str(database_name),
                str(table_name))
            populate_auth_log.error(error_msg)
            change_cursor.close()
            cnx.close()
            raise mysql_utils.MySQLDatabaseException(message=error_msg)
        else:
            cnx.commit()
            populate_auth_log.info(
                "Added a pair of authorization tokens for {0} in {1}.{2} successfully!"
                .format(str(user_type), str(database_name), str(table_name)))

    # Done. Inform the user and exit
    populate_auth_log.info("{0}.{1} populated successfully!".format(
        str(database_name), str(table_name)))
Exemple #7
0
def getCustomerDevices(customer_name, type=None, textSearch=None, idOffset=None, textOffset=None, limit=50):
    """Method that executes a GET request to the device-controller.getCustomerDevice service to the remote API in order to obtain a list of devices associated with the customer identified by 'customer_name'. For now, this method then sends that
    information to be used to update the ambiosensing_thingsboard.thingsboard_devices_tables. This method is but a subset of the getTenantDevices method from this own module in the sense that, by specifying a user during the method call,
    the list of devices returned is limited to just the devices assigned to this customer while the getTenantDevices returns the list of all devices, as long as they are assigned to a tenant, regardless of whom that tenant may be.
    @:type user_types allowed for this service: TENANT_ADMIN, CUSTOMER_USER
    @:param customer_name (str) - The name of the customer as it was defined in its registration in the ThingsBoard interface. This parameter is going to be use to perform SELECT operations in the MySQL database using 'LIKE' clauses so,
    unlike some of the fields in the API service requests, there's some flexibility here for using names that are not exactly identical to what is in the database. The search is going to be applied to the 'name' column of the
    thingsboard_customers_table. Retrieved customer records via this interface are then used to build the service call to the remote API
    @:param type (str) - Use this field to narrow down results based on the type of device to return. The type field is set during the device registration in the ThingsBoard platform and can then be used later to associate various devices to the
    same type (e.g., 'Thermometer', 'luximeter', etc..). The search operation is case-sensitive, i.e., only complete type matches are returned.
    @:param textSearch (str) - Use this field to narrow down the number of returned results based on the 'name' field. Like the previous field, this one is also case-sensitive (only identical matches return results)
    @:param idOffset (str) - Another search field based on the 'id' parameter this time. It does provide just a little bit of flexibility when compared with previous search fields, in the sense that it accepts and processes incomplete id strings,
    as long as some (but not all) of the 12 character segment of its last block are omitted.
    @:param textOffset (str) - Still no clue on what this might be used for...
    @:param limit (int) - Use this field to truncate the number of returned results. If the result set returned from the remote API was truncated for whatever reason, the result dictionary is returned with another dictionary under the
    'nextPageLink' key detailing the results still to be returned and the 'hasNext' key set to True. Otherwise 'nextPageLink' is set to NULL and 'hasNext' to False
    @:raise utils.InputValidationException - For errors during the validation of inputs
    @:raise utils.ServiceEndpoointException - For error during the remote API access
    @:raise Exception - For any other errors
    @:return A HTTP response object containing the following result dictionary:
    {
        "data": [
            {
                customer_device_1_data
            },
            {
                customer_device_2_data
            },
            ...
            {
                customer_device_n_data
            }
        ],
        "nextPageLink": null,
        "hasNext": false
    }

    Each customer_device_data element is a dictionary in the following format:
    customer_device_n_data = {
        "id": {
            "entityType": str,
            "id": str
        },
        "createdTime": int,
        "additionalInfo": null or {
            "description": str
        },
        "tenantId": {
            "entityType": str,
            "id": str
        },
        "customerId": {
            "entityType": str,
            "id": str
        },
        "name": str,
        "type": str,
        "label": str
    }
    """

    customer_device_log = ambi_logger.get_logger(__name__)
    module_table_key = 'customers'
    columns_to_retrieve = ['id']

    # Validate inputs
    try:
        # Start by the mandatory ones first
        utils.validate_input_type(customer_name, str)
        utils.validate_input_type(limit, int)
        if type:
            utils.validate_input_type(type, str)
        if textSearch:
            utils.validate_input_type(textSearch, str)
        if idOffset:
            utils.validate_input_type(idOffset, str)
        if textOffset:
            utils.validate_input_type(textOffset, str)
    except utils.InputValidationException as ive:
        customer_device_log.error(ive.message)
        raise ive

    # Check the number passed in the limit argument for consistency
    if limit <= 0:
        error_msg = "Invalid limit provided: {0}. Please provide a greater than zero limit value!".format(str(limit))
        customer_device_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Before going any further, there a limiting factor here: the customer id. I need to use the customer_name parameter to fetch it through a database consultation. The customer search is going to be an exhaustive one: I'll first try to search
    # for the customer_name that was passed as is. If a single result is returned - the desired outcome - cool, move on. If not, try to add a wildcard character at the end of customer_name (customer_name%), then to just the beginning (
    # %customer_name) and, if I still can't find a single result, try one last time with wildcard on both ends of the string (%customer_name%) in order to get an unique record (multiple records returned are also disregarded). If no clear answer is
    # obtained thus far, raise an Exception with this information
    # Connect to the MySQL database
    cnx = mysql_utils.connect_db(user_config.mysql_db_access['database'])

    # And get a buffered cursor to run SQL statements
    select_cursor = cnx.cursor(buffered=True)

    # Build the SQL SELECT statement to execute in the MySQL database context
    sql_select = """SELECT """ + ", ".join(columns_to_retrieve) + """ FROM """ + str(proj_config.mysql_db_tables[module_table_key]) + """ WHERE name LIKE %s;"""

    # Run the statement and check what comes back
    select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str(customer_name),))

    # If I got a single result from the last SQL execution, I don't need to retrieve the record itself to check it: the cursor retains the number of records found in the statement that was just executed in its rowcount internal variable (which is
    # effectively the same as running a SELECT COUNT(*) instead)

    if select_cursor.rowcount != 1:
        # If the last statement failed, try again with a wildcard character at the end of the customer_name
        customer_device_log.warning("Unable to get an unique result searching for a customer_name = {0} (got {1} results instead). Trying again using customer_name = {2}..."
                                    .format(str(customer_name), str(select_cursor.rowcount), str(customer_name + "%")))
        select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str(customer_name + "%"),))

        if select_cursor.rowcount != 1:
            customer_device_log.warning("Unable to get an unique result searching for a customer_name = {0} (got {1} result instead). Trying again using customer_name = {2}..."
                                        .format(str(customer_name + "%"), str(select_cursor.rowcount), str("%" + customer_name)))
            select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str("%" + customer_name),))

            if select_cursor.rowcount != 1:
                customer_device_log.warning("Unable to get an unique result searching for a customer_name = {0} (got {1} result instead). Trying again using customer_name = {2}..."
                                            .format(str("%" + customer_name), str(select_cursor.rowcount), str("%" + customer_name + "%")))
                select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str("%" + customer_name + "%"),))

                if select_cursor.rowcount != 1:
                    error_msg = "The method was unable to retrieve an unique record for customer_name = {0} (got {1} results instead). Nowhere to go but out now..."\
                        .format(str("%" + customer_name + "%"), str(select_cursor.rowcount))
                    customer_device_log.error(error_msg)
                    exit(-1)

    # If my select_cursor was able to go through the last flurry of validation, retrieve the result obtained
    result = select_cursor.fetchone()

    # The SQL SELECT result returns records as n-element tuples, n the number of columns returned. The SQL statement in this method queries for a single column: 'id', so any result returned should be a single element tuple
    customer_id = str(result[0])

    # I now have everything that I need to place a call to the remote API service. Build the service endpoint
    service_endpoint = "/api/customer/{0}/devices?".format(customer_id)

    url_strings = []
    if type:
        # Don't forget to escape the url strings characters to URL-compatible characters, including the '/' character for '%2F'
        url_strings.append("type=" + urllib.parse.quote(type.encode('UTF-8')).replace('/', '%2F'))
    if textSearch:
        url_strings.append("textSearch=" + urllib.parse.quote(textSearch.encode('UTF-8')).replace('/', '%2F'))
    if idOffset:
        url_strings.append("idOffset=" + urllib.parse.quote(idOffset.encode('UTF-8')).replace('/', '%2F'))
    if textOffset:
        url_strings.append("textOffset=" + urllib.parse.quote(textOffset.encode('UTF-8')).replace('/', '%2F'))
    url_strings.append("limit=" + str(limit))

    # Concatenate all the gathered url_strings together with the rest of the service_endpoint, using '&' as a separator
    service_endpoint += '&'.join(url_strings)

    # Get the request dictionary using a REGULAR type authorization token
    service_dict = utils.build_service_calling_info(mac.get_auth_token(user_type='tenant_admin'), service_endpoint)

    # Query the remote API
    try:
        response = requests.get(url=service_dict['url'], headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(str(service_dict['url']))
        customer_device_log.error(error_msg)
        raise utils.ServiceEndpointException(message=ce)

    # If I got a response, check first if it was the expected HTTP 200 OK
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received an HTTP " + str(eval(response.text)['status']) + " with message: " + str(eval(response.text)['message'])
        customer_device_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # I got a valid results, it appears. Check if the number of results returned was truncated by the limit parameter. If so, warn the user only (there's no need to raise Exceptions on this matter)
        # Translate the results to Python-speak first before going for the comparison given that this result set was returned from a MySQL backend
        if eval(utils.translate_postgres_to_python(response.text))['hasNext']:
            customer_device_log.warning("Only {0} results returned. There are still results to return from the remote API side. Increase the 'limit' argument to obtain them.".format(str(limit)))

    # I'm good then. Return the result set back
    return response
def retrieve_asset_id(asset_name):
    """
    This method receives the name of an asset and infers the associated id from it by consulting the respective database table.
    :param asset_name: (str) The name of the asset to retrieve from ambiosensing_thingsboard.tb_tenant_assets. This method can only implement the limited searching capabilities provided by the MySQL database. If these are not enough to retrieve an
    unique record associated to the asset name provided, the method 'fails' (even though there might be a matching record in the database but with some different uppercase/lowercase combination than the one provided) and returns 'None' in that case.
    :raise utils.InputValidationException: If the input argument fails the data type validation.
    :raise mysql_utils.MySQLDatabaseException: If any errors occur when accessing the database.
    :return asset_id: (str) If a unique match was found to the provided asset_name. None otherwise.
    """
    log = ambi_logger.get_logger(__name__)

    # Validate the input
    utils.validate_input_type(asset_name, str)

    # Prepare the database access elements
    database_name = user_config.access_info['mysql_database']['database']
    table_name = proj_config.mysql_db_tables['tenant_assets']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)

    sql_select = """SELECT entityType, id FROM """ + str(
        table_name) + """ WHERE name = %s;"""

    select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                  sql_statement=sql_select,
                                                  data_tuple=(asset_name, ))

    # Analyse the execution
    if select_cursor.rowcount is 0:
        log.warning(
            "No records returned from {0}.{1} using asset_name = {2}".format(
                str(database_name), str(table_name), str(asset_name)))
        select_cursor.close()
        cnx.close()
        return None
    elif select_cursor.rowcount > 1:
        log.warning(
            "{0}.{1} returned {2} records for asset_name = {3}. Cannot continue..."
            .format(str(database_name), str(table_name),
                    str(select_cursor.rowcount), str(asset_name)))
        select_cursor.close()
        cnx.close()
        return None
    else:
        # Got a single result back. Check if the entityType matches the expected one
        record = select_cursor.fetchone()
        if record[0] != 'ASSET':
            error_msg = "The record returned from {0}.{1} using asset_name = {2} has a wrong entityType. Got a {3}, expected an 'ASSET'".format(
                str(database_name), str(table_name), str(asset_name),
                str(record[0]))
            log.error(msg=error_msg)
            select_cursor.close()
            cnx.close()
            return None
        else:
            # All is well. Return the retrieved id after validation
            asset_id = record[1]
            utils.validate_id(entity_id=asset_id)

            # If the parameter returned survived the last battery of validations, all seems OK. Return the result
            select_cursor.close()
            cnx.close()
            return asset_id
def get_asset_env_data(start_date,
                       end_date,
                       variable_list,
                       asset_name=None,
                       asset_id=None,
                       filter_nones=True):
    """
    Use this method to retrieve the environmental data between two dates specified by the pair base_date and time_interval, for each of the variables indicated in the variables list and for an asset identified by at least one of the elements in
    the pair asset_name/asset_id.
    :param start_date: (datetime.datetime) A datetime.datetime object denoting the beginning (the oldest date element) of the time window for data  retrieval
    :param end_date: (datetime.datetime) A datetime.datetime object denoting the end (the newest date element) of the time window for data retrieval.
    :param variable_list: (list of str) A list with the variable names (ontology names) that are to be retrieved from the respective database table. Each one of the elements in the list provided is going to be validated against the 'official'
    list
    in proj_config.ontology_names.
    :param asset_name: (str) The name of the asset entity from where the data is to be retrieved from. This method expects either this parameter or the respective id to be provided and does not execute unless at least one of them is present.
    :param asset_id: (str) The id string associated to an asset element in the database, i.e., the 32 byte hexadecimal string in the usual 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' format. This method expects either this element or the asset name to
    be provided before continuing. If none are present, the respective Exception is raised.
    :param filter_nones: (bool) Set this flag to True to exclude any None values from the final result dictionary. Otherwise the method returns all values, including NULL/None ones.
    :raise utils.InputValidationException: If any of the inputs fails the initial data type validation or if none of the asset identifiers (name or id) are provided.
    :raise mysql_utils.MySQLDatabaseException: If any errors occur during the database accesses.
    :return response (dict): This method returns a response dictionary in a format that is expected to be serialized and returned as a REST API response further on. For this method, the response dictionary has the following format:
        response =
            {
                env_variable_1: [
                    {
                        timestamp_1: <str>,
                        value_1: <str>
                    },
                    {
                        timestamp_2: <str>.
                        value_2: <str>
                    },
                    ...,
                    {
                        timestamp_N: <str>,
                        value_N: <str>
                    }
                ],
                env_variable_2: [
                    {
                        timestamp_1: <str>,
                        value_1: <str>
                    },
                    {
                        timestamp_2: <str>.
                        value_2: <str>
                    },
                    ...,
                    {
                        timestamp_N: <str>,
                        value_N: <str>
                    }
                ],
                ...
                env_variable_N: [
                    {
                        timestamp_1: <str>,
                        value_1: <str>
                    },
                    {
                        timestamp_2: <str>.
                        value_2: <str>
                    },
                    ...,
                    {
                        timestamp_N: <str>,
                        value_N: <str>
                    }
                ]
            }
    """
    log = ambi_logger.get_logger(__name__)

    # Validate inputs

    # Check if at least one element from the pair asset_name/asset_id was provided
    if not asset_name and not asset_id:
        error_msg = "Missing both asset name and asset id from the input parameters. Cannot continue until at least one of these is provided."
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    if asset_name:
        utils.validate_input_type(asset_name, str)

    if asset_id:
        utils.validate_id(entity_id=asset_id)

    utils.validate_input_type(start_date, datetime.datetime)
    utils.validate_input_type(end_date, datetime.datetime)

    if start_date > datetime.datetime.now():
        error_msg = "The start date provided: {0} is invalid! Please provide a past date to continue...".format(
            str(start_date))
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    if end_date > datetime.datetime.now():
        error_msg = "The end date provided: {0} is invalid! Please provide a past or current date to continue....".format(
            str(end_date))
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    if start_date >= end_date:
        error_msg = "Invalid time window for data retrieval provided: {0} -> {1}. Cannot continue until a start_date < end_date is provided!".format(
            str(start_date), str(end_date))
        log.error(msg=error_msg)
        raise utils.InputValidationException(message=error_msg)

    utils.validate_input_type(variable_list, list)
    for i in range(0, len(variable_list)):
        # Check inf the element is indeed a str as expected
        utils.validate_input_type(variable_list[i], str)
        # Take the change to normalize it to all lowercase characters
        variable_list[i] = variable_list[i].lower()

        # And check if it is indeed a valid element
        if variable_list[i] not in proj_config.ontology_names:
            log.warning(
                "Attention: the environmental variable name provided: {0} is not among the ones supported:\n{1}\nRemoving it from the variable list..."
                .format(str(variable_list[i]),
                        str(proj_config.ontology_names)))
            variable_list.remove(variable_list[i])

    # Check if the last operation didn't emptied the whole environmental variable list
    if len(variable_list) == 0:
        error_msg = "The variable list is empty! Cannot continue until at least one valid environmental variable is provided"
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # The asset_id is one of the most important parameters in this case. Use the provided arguments to either obtain it or make sure the one provided is a valid one. If both parameters were provided (asset_id and asset_name)
    if asset_id:
        # If an asset id was provided, use it to obtain the associated name
        asset_name_db = retrieve_asset_name(asset_id=asset_id)

        # Check if any names were obtained above and, if so, check if it matches any asset name also provided
        if asset_name:
            if asset_name != asset_name_db:
                log.warning(
                    "The asset name obtained from {0}.{1}: {2} does not matches the one provided: {3}. Defaulting to {2}..."
                    .format(
                        str(user_config.access_info['mysql_database']
                            ['database']),
                        str(proj_config.mysql_db_tables['tenant_assets']),
                        str(asset_name_db), str(asset_name)))
                asset_name = asset_name_db

    if not asset_id and asset_name:
        # Another case: only the asset name was provided but no associated id. Use the respective method to retrieve the asset id from the name
        asset_id = retrieve_asset_id(asset_name=asset_name)

        # Check if a valid id was indeed returned (not None)
        if not asset_id:
            error_msg = "Invalid asset id returned from {0}.{1} using asset_name = {2}. Cannot continue...".format(
                str(user_config.access_info['mysql_database']['database']),
                str(proj_config.mysql_db_tables['tenant_assets']),
                str(asset_name))
            log.error(msg=error_msg)
            raise utils.InputValidationException(message=error_msg)

    utils.validate_input_type(filter_nones, bool)

    # Initial input validation cleared. Before moving any further, implement the database access objects and use them to retrieve a unique, single name/id pair for the asset in question
    database_name = user_config.access_info['mysql_database']['database']
    asset_device_table_name = proj_config.mysql_db_tables['asset_devices']
    device_data_table_name = proj_config.mysql_db_tables['device_data']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)

    # All ready for data retrieval. First, retrieve the device_id for every device associated to the given asset
    sql_select = """SELECT toId, toName FROM """ + str(
        asset_device_table_name
    ) + """ WHERE fromEntityType = %s AND fromId = %s AND toEntityType = %s;"""

    select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                  sql_statement=sql_select,
                                                  data_tuple=('ASSET',
                                                              asset_id,
                                                              'DEVICE'))

    # Analyse the execution results
    if select_cursor.rowcount is 0:
        error_msg = "Asset (asset_name = {0}, asset_id = {1}) has no devices associated to it! Cannot continue...".format(
            str(asset_name), str(asset_id))
        log.error(msg=error_msg)
        select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)
    else:
        log.info(
            "Asset (asset_name = {0}, asset_id = {1}) has {2} devices associated."
            .format(str(asset_name), str(asset_id),
                    str(select_cursor.rowcount)))

    # Extract the devices id's to a list for easier iteration later on
    record = select_cursor.fetchone()
    device_id_list = []

    while record:
        device_id_list.append(record[0])

        # Grab another one
        record = select_cursor.fetchone()

    # Prepare a mash up of all device_id retrieved so far separated by OR statements to replace the last element in the SQL SELECT statement to execute later on
    device_id_string = []

    for _ in device_id_list:
        device_id_string.append("deviceId = %s")

    # And now connect them all into a single string stitched together with 'OR's
    device_where_str = """ OR """.join(device_id_string)

    # Store the full results in this dictionary
    result_dict = {}

    # Prepare the SQL SELECT to retrieve data from
    for i in range(0, len(variable_list)):
        # And the partial results in this one
        sql_select = """SELECT timestamp, value FROM """ + str(
            device_data_table_name) + """ WHERE ontologyId = %s AND (""" + str(
                device_where_str
            ) + """) AND (timestamp >= %s AND timestamp <= %s);"""

        # Prepare the data tuple by joining together the current ontologyId with all the deviceIds retrieved from before
        data_tuple = tuple([variable_list[i]] + device_id_list + [start_date] +
                           [end_date])

        select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                      sql_statement=sql_select,
                                                      data_tuple=data_tuple)

        # Analyse the execution outcome
        if select_cursor.rowcount > 0:
            # Results came back for this particular ontologyId. For this method, the information of the device that made the measurement is irrelevant. Create a dictionary entry for the ontologyId parameter and populate the list of dictionaries
            # with the data retrieved
            result_dict[variable_list[i]] = []

            # Process the database records
            record = select_cursor.fetchone()

            while record:
                # Check if the filtering flag is set
                if filter_nones:
                    # And if so, check if the current record has a None as its value
                    if record[1] is None:
                        # If so, grab the next record and skip the rest of this cycle
                        record = select_cursor.fetchone()
                        continue

                result_dict[variable_list[i]].append({
                    "timestamp":
                    str(int(record[0].timestamp())),
                    "value":
                    str(record[1])
                })

                # Grab the next record in line
                record = select_cursor.fetchone()

    # All done it seems. Close down the database access elements and return the results so far
    select_cursor.close()
    cnx.close()
    return result_dict
def refresh_session_token(sys_admin=False,
                          tenant_admin=False,
                          customer_user=False):
    """This method is analogous to the get_session_token one but using the refreshToken, that is assumed to be in the respective database already, to get a valid authorization token without needing to provide the access credentials again,
    thus a more secure way to keep sessions active. This requires at least one of the input argument flags to be set to function. All three currently supported user_types can be refreshed by one call to this method, as long as the flags are set.
    The idea here being that, when requested, the remote API returns a pair of authentication token/refresh token in which the authentication token as a shorter validity period than the refresh token. But once the authentication token get expired,
    if a call to this method is placed before the refresh token also expires (which also happened, though later than the first one) allows to reset the whole thing, since using the refresh token routine results in a new, fresh pair with both
    expiration periods reset
    @:param sys_admin (bool) - Flag to set a refresh on the tokens for the SYS_ADMIN
    @:param tenant_admin (bool) - Flag to set a refresh on the tokens for the TENANT_ADMIN
    @:param customer_user (bool) - Flag to set a refresh on the tokens for the CUSTOMER_USER
    @:raise utils.AuthenticationException - If the access credentials cannot be used to retrieve authentication data
    @:raise utils.ServiceCallException - If errors happen when accessing the remote service
    @:raise utils.InputValidationException - If an invalid argument is provided (data type wise)
    @:raise mysql_utils.MySQLDatabaseException - If errors happen with the database access or with the integrity of the data in the database
    @:return auth_dict (dict) - An authentication dictionary in the same format used so far:
        auth_dict = {
            'sys_admin': {
                'token': str,
                'refreshToken': str
            },
            'tenant_admin': {
                'token': str,
                'refreshToken': str
            },
            'customer_user': {
                'token': str,
                'refreshToken': str
            }
        }
    As before, omitted user types have their sub dictionary set to None. This method only returns this structure back. Its up to the calling method to update the database table, to keep things decoupled as much as possible at this point
    """
    refresh_token_log = ambi_logger.get_logger(__name__)

    # Check first if at least one of the argument flags was set, after validation that is
    if sys_admin:
        utils.validate_input_type(sys_admin, bool)

    if tenant_admin:
        utils.validate_input_type(tenant_admin, bool)

    if customer_user:
        utils.validate_input_type(customer_user, bool)

    # If all argument flags are False
    if not (sys_admin or tenant_admin or customer_user):
        error_msg = "No user types set. Please set one of the argument flags when calling this method..."
        refresh_token_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Retrieve the existing tokens from the database into the typical authentication dictionary
    auth_dict = {
        'sys_admin': None,
        'tenant_admin': None,
        'customer_user': None
    }

    # Add all the requested user type to a list
    user_type_list = []

    # Add the relevant user type strings to a list for iterating
    if sys_admin:
        user_type_list.append('sys_admin')

    if tenant_admin:
        user_type_list.append('tenant_admin')

    if customer_user:
        user_type_list.append('customer_user')

    # And grab the results from the database
    database_name = user_config.mysql_db_access['database']
    table_name = proj_config.mysql_db_tables['authentication']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)
    select_column = 'user_type'

    sql_select = """SELECT * FROM """ + str(table_name) + """ WHERE """

    data_tuple = tuple(user_type_list)
    where_list = []

    for i in range(0, len(user_type_list)):
        where_list.append(select_column + " = %s")

    sql_select += """ OR """.join(where_list) + """;"""

    # Execute the statement then
    select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select,
                                                  data_tuple)

    # Check if any results were returned
    if not select_cursor.rowcount:
        error_msg = "The statement '{0}' didn't return any results from {1}.{2}.".format(
            str(select_cursor.statement), str(database_name), str(table_name))
        refresh_token_log.error(error_msg)
        select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)
    # Move on if you got any results back
    else:
        # Get the list of column names from the authentication table to use as a reference to obtain the data I'm looking for
        column_list = mysql_utils.get_table_columns(
            database_name=database_name, table_name=table_name)

        # Create the base endpoint (for getting a new pair of authorization tokens from an expired authorization one and a still valid refresh token, provide both tokens (with the refreshToken sent as data payload) to the '/token' endpoint
        service_endpoint = '/api/auth/token'

        # Now I can pick up a record at a time, refresh the authentication token and update the return dictionary with the reply
        result = select_cursor.fetchone()

        # Do the following as long as there is a non-None element returned from the database cursor
        while result:
            # Start by getting the standard service call structures
            con_dict = utils.build_service_calling_info(
                auth_token=result[column_list.index('token')],
                service_endpoint=service_endpoint)

            # Build the additional data payload structure which is needed for this service call in particular
            data = '{"refreshToken": "' + str(
                result[column_list.index('refreshToken')]) + '"}'

            # And call the remote API with the refresh request
            try:
                api_response = requests.post(url=con_dict['url'],
                                             headers=con_dict['headers'],
                                             data=data)
            except (requests.exceptions.ConnectionError,
                    requests.exceptions.ConnectTimeout):
                error_msg = "Unable to establish a connection with {0}:{1}. Exiting...".format(
                    str(user_config.thingsboard_host),
                    str(user_config.thingsboard_port))
                refresh_token_log.error(error_msg)
                select_cursor.close()
                cnx.close()
                raise utils.ServiceEndpointException(message=error_msg)

            # If a non-HTTP 200 status code was returned, its probably a credential issue. Raise an exception with the proper information in it
            if api_response.status_code != 200:
                # Check first if the status code is HTTP 401 and if the sub-errorCode is 11, which means that the refresh token for this user_type is also expired. In this case I can always request a new pair instead of raising an Exception
                if api_response.status_code == 401 and eval(
                        api_response.text)['errorCode'] == 11:
                    user_type = result[column_list.index('user_type')]
                    refresh_token_log.warning(
                        "The refresh token for user_type '{0}' is also expired. Requesting a new pair..."
                        .format(str(user_type)))

                    # Request for a new pair of authorization tokens but only for the user_type that has its expired so that the other user_types tokens currently in the database don't get invalidated by forcing an issue of a new pair. The
                    # get_session_token method either returns a valid pair of authorization tokens or it raises an Exception with the reason why it couldn't do it in the first place, so there's no need to verify the next call's results
                    auth_dict = get_session_tokens(
                        sys_admin=(user_type == 'sys_admin'),
                        tenant_admin=(user_type == 'tenant_admin'),
                        customer_user=(user_type == 'customer_user'))

                    return auth_dict

                # If the error was something other than HTTP 401 with a sub-errorCode of 10, raise an Exception with the error details
                else:
                    refresh_token_log.error(api_response.text)
                    select_cursor.close()
                    cnx.close()
                    raise utils.AuthenticationException(
                        message=api_response.text,
                        error_code=api_response.status_code)

            # Got a pair of valid tokens back. Update the structures then
            else:
                auth_dict[result[column_list.index('user_type')]] = eval(
                    api_response.text)

            # And grab the next result for another iteration of this
            result = select_cursor.fetchone()

        # The while loop is done here and I've processed all results thus far. All its left to do is return the updated authorization dictionary
        return auth_dict