def retrieve_asset_name(asset_id):
    """
    This method mirrors the last one in the sense that it receives an asset_id and returns the associated name, if any.
    :param asset_id: (str) A 32-byte hexadecimal string in the expected xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format. This element is configured in the database as the primary key, which means that this method should never receive multiple records.
    :raise utils.InputValidationException: If the the input argument fails the initial data type validation
    :raise mysql_utils.MySQLDatabaseException: If any errors occur when consulting the database.
    :return asset_name: (str) If an unique match was found, None otherwise
    """
    log = ambi_logger.get_logger(__name__)

    # Validate the input
    utils.validate_id(entity_id=asset_id)

    # Prepare the database access elements
    database_name = user_config.access_info['mysql_database']['database']
    table_name = proj_config.mysql_db_tables['tenant_assets']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)

    sql_select = """SELECT entityType, name FROM """ + str(
        table_name) + """ WHERE id = %s;"""
    select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                  sql_statement=sql_select,
                                                  data_tuple=(asset_id, ))

    # Analyse the execution
    if select_cursor.rowcount is 0:
        log.warning(
            "No records returned from {0}.{1} using asset_id = {2}".format(
                str(database_name), str(table_name), str(asset_id)))
        select_cursor.close()
        cnx.close()
        return None
    elif select_cursor.rowcount > 1:
        log.warning(
            "{0}.{1} returned {2} records using asset_id = {3}. Cannot continue..."
            .format(str(database_name), str(table_name),
                    str(select_cursor.rowcount), str(asset_id)))
        select_cursor.close()
        cnx.close()
        return None
    else:
        # A single return came back. Process it then
        record = select_cursor.fetchone()
        if record[0] != 'ASSET':
            error_msg = "The record returned from {0}.{1} using asset id = {2} has a wrong entityType. Got a {3}, expected an 'ASSET'".format(
                str(database_name), str(table_name),
                str(select_cursor.rowcount), str(record[0]))
            log.error(msg=error_msg)
            select_cursor.close()
            cnx.close()
            return None
        else:
            # All is good so far. Check if the name returned is indeed a str and return it if so
            asset_name = record[1]
            utils.validate_input_type(asset_name, str)
            select_cursor.close()
            cnx.close()
            return asset_name
def convert_timestamp_tb_to_datetime(timestamp):
    """This method converts a specific timestamp from a ThingsBoard remote API request (which has one of the weirdest formats that I've seen around) and returns a datetime object that can be interpreted by the DATETIME data format,
    which way more human readable that the POSIX timestamp that is being used in the ThingsBoard Postgres database.
    databases, i.e., YYYY-MM-DD hh:mm:ss, which also corresponds to the native datetime.datetime format from python
    @:param timestamp (int) - This is one of the trickiest elements that I've found so far. The ThingsBoard internal data is stored in a Postgres database. I'm assuming that is the one behind the data format returned by the remote API. Whatever
    it may be, it returns a 13 digit integer as the timestamp. A quick analysis suggests that this is a regular POSIX timestamp, i.e., the number of seconds from 1970-01-01 00:00:00 until whenever that data was inserted in the database.
    There are literally loads of different and straightforward ways to convert this value into a human-readable datetime. Yet none of them seemed to work with this particular value. In fact, none of the timestamps returned from the remote
    API was able to be converted into a datetime. And the reason is stupid as hell! It seems that, if you bother to count all seconds from 1970 until today, you get a number with 10 digits... and you have been getting that for quite some
    time given how long has to pass to add a new digit to this value. A bit more of investigation showed that, as well with regular datetime elements, POSIX timestamps also indicate the number of microseconds elapsed, but normally that is
    expressed as a 17 digit float in which the last 5 are the decimal part, i.e., the microseconds, but there's an obvious decimal point w«in those cases where the POSIX timestamp also has the number of microseconds. The only reasonable
    explanation (though somewhat weird in its own way) is that the value returned by the remote API contains 3 decimal digits and, for whatever reason behind it, the decimal point is omitted. It turns out that this is exactly what is going
    on! So I need to do extra flexing with this one... The method expects the 13 digit integer that comes straight from the remote API call and then itself does whatever needs to return a meaningful datetime
    @:return data_datetime (datetime.datetime) - A regular datetime object that can be sent directly to a MySQL database expecting a DATETIME field (YYYY-MM-DD hh:mm:ss)
    @:raise utils.InputValidationException - If there is something wrong with the validation of inputs
    """
    times2date_log = ambi_logger.get_logger(__name__)

    utils.validate_input_type(timestamp, int)

    # Given how weird are the datetime values returned by the ThingsBoard API, I'm going to extra anal with this one
    if len(str(timestamp)) != 13:
        error_msg = "Please provide the full value for the timestamp returned by the remote API (expecting a 13 digit int, got {0} digits.)".format(
            str(len(str(timestamp))))
        times2date_log.error(error_msg)
        raise Exception(error_msg)

    # All appears to be in good order so far. From here I could simply divide the timestamp value by 1000 to get it to 10 integer digits (+3 decimal) but I'm not particularly concerned about microseconds, really. So, might as well drop the
    # last 3 digits of the timestamp and call it a day (forcing a int cast after dividing the timestamp by 1000 effectively truncates the integer part of it, thus achieving the desired outcome)
    timestamp = int(timestamp / 1000)

    # The rest is trivial
    return datetime.datetime.fromtimestamp(timestamp)
Пример #3
0
def get_device_credentials(device_name):
    """
    This method simplifies a recurring task: determining the entityType, entityId and timeseriesKeys associated to the device identified by 'device_name'.
    Since most remote services provided by the Thingsboard remote API are fond of using this entityType/entityId pair to uniquely identify every entity configured in the platform (tenants, customers, devices, etc.) but us humans are more keen to
    rely on names to identify the same entities, this method integrates both approaches for the devices context: it searches the current database entries for the name provided and, if a unique result is obtained, returns its associated entityType,
    entityId and timeseriesKeys.
    @:param device_name (str) - The name of the device, as it should be defined via the 'name' field in the respective Thingsboard installation
    @:raise utils.InputValidationException - If the input fails initial validation
    @:raise mysql_utils.MySQLDatabaseException - If the database access incurs in problems
    @:return None if no single device could be found or if there are no credentials currently configured in the database for this device. Otherwise, the credentials are going to be returned in the following format:
    device_credentials = (entityType (str), entityId (str), timeseriesKeys(list of str))
    """
    log = ambi_logger.get_logger(__name__)

    utils.validate_input_type(device_name, str)

    # Create the typical database access objects
    database_name = user_config.access_info['mysql_database']['database']
    table_name = proj_config.mysql_db_tables['devices']
    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)

    # Now for the tricky part: to run a SQL SELECT using the device name as the only filter and ensuring than one and only one record is returned. For this I'm going to use the SQL 'LIKE' operator and using all four wildcard options in the search
    # field: 'device_name', '%device_name', 'device_name%' and '%device_name%'.
    search_vector = [device_name, '%' + device_name, device_name + '%', '%' + device_name + '%']
    sql_select = """SELECT entityType, id, timeseriesKeys FROM """ + table_name + """ WHERE name LIKE %s;"""

    # Run a SQL SELECT with all elements of the search vector, stopping the loop as soon as a single record gets returned back
    result = None
    for i in range(0, len(search_vector)):
        select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor, sql_statement=sql_select, data_tuple=(search_vector[i],))

        if select_cursor.rowcount != 1:
            log.warning("Searching for name = {0}. Got {1} results back. Expanding search with new wildcards...".format(str(search_vector[i]), str(select_cursor.rowcount)))
            # Ignore the rest of this loop in this case then
            continue
        else:
            log.info("Got an unique record while searching for name = {0}. Moving on".format(str(search_vector[i])))
            # Set the result parameter
            result = select_cursor.fetchone()
            # Get out of this loop in this case
            break

    # Check if a valid result was found. Continue if so, return None otherwise
    if result is None:
        log.warning("Could not retrieve an unique record for device_name = {0} in {1}.{2}. Nothing more to do...".format(str(device_name), str(database_name), str(table_name)))
        return None
    else:
        # Extract the timeseriesKey string to a list of strings and prepare the return tuple
        timeseriesKeys_list = []

        # The list of timeseries keys is kept in the database as single string with all elements separated with a comma: timeseriesKeys = "timeseriesKey1,timeseriesKey2,...,timeseriesKeyN"
        tokens = result[2].split(',')

        for i in range(0, len(tokens)):
            timeseriesKeys_list.append(tokens[i])

        # All done. Return the final structure then
        return result[0], result[1], timeseriesKeys_list
def get_logger(logger_name, file_location=None):
    """This method aggregates the get_console_handler() and the get_file_handler() methods to return a complete logger that, once invoked, sends the log
    messages both to the stdout and the defined log file, with the format defined in the string FORMATTER. NOTE: By providing a new name to this function,
    a new logger object is returned. If the name corresponds to an already existing logger object in memory, its handler is returned instead.
    @:param logger_name (str) - A name that is going to be associated with logger object (different from the variable that holds the object)
    @:param file_location (optional str) - A valid system path, including the log file name, where these logs should be written to
    @:raise util.InputValidationException - If there's a problem with the input arguments
    @:return logging.Logger - The Logger object if the operation was successful
    """

    try:
        utils.validate_input_type(logger_name, str)
        if file_location:
            # First validate the file location against it most basic data type
            utils.validate_input_type(file_location, str)
            # Then check if the path in question is a valid one too
            try:
                # Try to open the file indicated in the file location argument. If there's something wrong with it, catch the expected exception
                temp_f = open(file_location)
                # Close the file if nothing wrong happens when trying to open it
                temp_f.close()
            except FileNotFoundError as fnf:
                print("The file location provided ({0}) is not correct!".format(str(file_location)))
                raise fnf
    except utils.InputValidationException as ive:
        # At this level, there isn't much I can do about this... send the Exception to the caller
        raise ive

    # Get a basic, un-configured Logger object first
    logger = logging.getLogger(logger_name)

    # Set the logger level to a low one. These loggers operate on six possible logging.Levels that have a numeric value associated, namely:
    # NOTSET (0)
    # DEBUG (10)
    # INFO (20)
    # WARN (30)
    # ERROR (40)
    # CRITICAL (50)
    # The numeric value is just a shorthand for the name of the level itself (setting setLevel(logging.DEBUG) or setLevel(10) is the same thing
    # The way this works is, the base level determines what gets logged or not, based on the log level of the message. Setting this logger to DEBUG level
    # means that all messages with the same level or above are logged but not the ones with lower levels. In this case, a logger.error(message) logs the
    # message because it is logged with a ERROR (40) level but not a NOTSET leveled message.
    logger.setLevel(logging.DEBUG)

    # Add the handlers to the base logger. But before, since this function is going to be repeatedly called along this project, sometimes (depending on the module structure and where exactly the logger is called from)
    # an existing logger is returned instead of a 'fresh' one. This means that it already has some handlers associated to it (assuming it was initially generated from this same module using the get_logger() method that
    # also associates a file and stream handler to it) and then it gets a new pair every time a logger is created upon an existing one. The result? Every time a logger logs anything (on the console or/and on the log file)
    # it prints the same line for every repeated handler that it has. That when you start seeing multiple repetitions of the same log line, down to the timestamp, on the console and file. The next couple of instructions
    # simplify the logger creation by wiping out any existing handlers before adding the only two ones needed
    if logger.hasHandlers():
        logger.handlers.clear()

    logger.addHandler(get_console_handler())
    logger.addHandler(get_file_handler(file_location))

    # The logger is now configured and ready to work. Return it
    return logger
def validate_database_table_name(table_name):
    """This simple method receives a name of a table and validates it by executing a SQL statement in the default database to retrieve all of its tables and then checks if the table name in the input does match any of the returned values.
    @:param table_name (str) - The name of the database table whose existence is to be verified
    @:raise utils.InputValidationException - If the inputs fail initial validation
    @:raise PostgresDatabaseException - If any error occur while executing database bounded operations or if the table name was not found among the list of database tables retrieved
    @:return True (bool) - If table_name is among the database tables list"""

    validate_db_table_log = ambi_logger.get_logger(__name__)

    # Validate the input
    utils.validate_input_type(table_name, str)

    # Get the default database name
    database_name = user_config.mysql_db_access['database']
    # Create the database interface elements
    cnx = connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)

    # Prepare the SQL statement
    sql_select = """SELECT tables.table_name FROM information_schema.TABLES;"""

    # And execute it
    select_cursor = run_sql_statement(select_cursor, sql_select, ())

    # Check the data integrity first
    if not select_cursor.rowcount:
        error_msg = "The SQL statement '{0}' didn't return any results! Exiting...".format(
            str(select_cursor.query))
        validate_db_table_log.error(error_msg)
        select_cursor.close()
        cnx.close()
        raise PostgresDatabaseException(message=error_msg)
    # If results were gotten
    else:
        # Grab the first one
        result = select_cursor.fetchone()

        # And run the next loop until all results were checked (result would be set to None once all the data retrieved from the database is exhausted)
        while result:
            # If a match is found
            if result[0] == table_name:
                # Return the response immediately
                return True
            # Otherwise
            else:
                # Grab the next one and run another iteration of this
                result = select_cursor.fetchone()

        # If I got here it means none of the results matched the table_name provided. Nothing more to do than to inform that the table name is not valid
        raise PostgresDatabaseException(
            message=
            "The table provided '{0}' is not among the current database tables!"
            .format(str(table_name)))
def get_table_columns(database_name, table_name):
    """This method does a simple SELECT query to the database for just the columns names in a given table. This is particular useful for building INSERT and UPDATE statements that require a specification of these elements on the
    statements
    @:param database (str) - The name of the database to connect to
    @:param table_name (str) - The name of the table from the database to connect to
    @:raise utils.InputValidationException - If any of the inputs is not valid
    @:raise PostgresDatabaseException - for database related exceptions
    @:return column_list (list of str) - A list with all the names of the columns, in order, extracted from the database.table_name
    """
    get_table_log = ambi_logger.get_logger(__name__)

    try:
        utils.validate_input_type(database_name, str)
        utils.validate_input_type(table_name, str)

        cnx = connect_db(database_name=database_name)
        select_cursor = cnx.cursor()

        sql_select = """SELECT column_name FROM information_schema.columns WHERE table_name = %s;"""
        data_tuple = (table_name, )

        select_cursor = run_sql_statement(cursor=select_cursor,
                                          sql_statement=sql_select,
                                          data_tuple=data_tuple)

        if not select_cursor.rowcount:
            error_msg = "Did not retrieve any column information from {0}.{1}. Cannot continue!".format(
                str(database_name), str(table_name))
            get_table_log.error(error_msg)
            raise PostgresDatabaseException(message=error_msg)

        # The last statement, if well executed, returns a list of one element tuples (because the SELECT statement only specified one field in this case). To transform this into a proper list I have to go through all elements of the list as get
        # them out of the tuple, basically...
        # Grab the full list first
        result_list = select_cursor.fetchall()

        return_list = []
        # And do the deed then
        for result in result_list:
            return_list.append(result[0])

        # Done. Send it back then
        return return_list

    except psycopg2.Error as err:
        get_table_log.error(
            "Got a code {0} error with message: {1} when connecting to {2}.{3}"
            .format(str(err.pgcode), str(err.pgerror), str(database_name),
                    str(table_name)))
        raise PostgresDatabaseException(message=err.pgerror,
                                        error_code=err.pgcode,
                                        diag=err.diag)
def convert_datetime_to_timestamp_tb(data_datetime):
    """This method is the literal inverse of the previous one: it receives a regular datetime object in the format YYYY-MM-DD hh:mm:ss.xxxx (I'm allowing microseconds in this one, if needed be) and returns the 13 digit timestamp that
    ThingsBoard's Postgres database expects
    @:param data_datetime (datetime.datetime) - A YYYY-MM-DD hh:mm:ss.xxxx representation of a date and a time, consistent with the datetime.datetime class
    @:return timestamp (int) - a 13 digit integer that its actually a 10 digit integer + 3 decimal digits with the decimal period omitted.
    @:raise utils.InputValidationException - For errors with the method's input arguments
    @:raise Exception - For all other errors
    """

    utils.validate_input_type(data_datetime, datetime.datetime)

    # The conversion between datetime.datetime to timestamp is direct but this operation yields a number between 15 and 16 digits, depending on the time of the date that originated it. In any case, the integer part is always fixed (at
    # least for the next couple of decades or so) and it has 10 digits only. So the easiest approach is to multiply the resulting timestamp by 1000 and then re-cast it to integer to drop the remaining digits that I'm not interested,
    # regardless of exactly how many they were in the beginning
    return int(data_datetime.timestamp() * 1000)
def getUser(auth_token):
    """This method uses the authorization token transmitted via the service endpoint string to return which type of user the provided token is associated with: SYS_ADMIN, TENANT_ADMIN or CUSTOMER USER. If successful, the API returns a dictionary
    with the user information that is currently stored in the server side database
    @:param auth_token (str) - A string of apparently random characters that was issues preemptively by the remote server after providing it with a pair of username-password access credentials
    @:raise utils.ServiceEndpointException - If the remote call failed. The exception message provides details on the failure reason
    @:return result_dict (dict) - If successful, the remote API returns the following structure:
    result_dict = {
          "id": {
            "entityType": str (one from proj_config.thingsboard_supported_entityTypes),
            "id": str (and 32 byte hexadecimal string with the characters/bytes grouped as 8-4-4-4-12)
          },
          "createdTime": int (a POSIX-type timestamp),
          "additionalInfo": {
            "description": str
          },
          "tenantId": {
            "entityType": str (one from proj_config.thingsboard_supported_entityTypes),
            "id": str (and 32 byte hexadecimal string with the characters/bytes grouped as 8-4-4-4-12)
          },
          "customerId": {
            "entityType": str (one from proj_config.thingsboard_supported_entityTypes),
            "id": str (and 32 byte hexadecimal string with the characters/bytes grouped as 8-4-4-4-12)
          },
          "email": str,
          "authority": str (a user type from the set (SYS_ADMIN, TENANT_ADMIN, CUSTOMER_USER),
          "firstName": str,
          "lastName": str,
          "name": str
}
    """
    # Validate the token real quickly
    utils.validate_input_type(auth_token, str)

    # Set the service endpoint
    service_endpoint = '/api/auth/user'

    # Grab the automatic elements needed to call the service (headers, url and such)
    service_dict = utils.build_service_calling_info(
        auth_token=auth_token, service_endpoint=service_endpoint)

    # And execute the damn thing
    response = requests.get(url=service_dict['url'],
                            headers=service_dict['headers'])

    return response
def connect_db(database_name):
    """Basic method that return a connection object upon a successful connection attempt to a database whose connection data is present in the configuration file, as a dictionary with the database name as its key
    NOTE: This method assumes a single server instance for the installation of all database data (a single triplet hostname, username and password). For databases that spawn over multiple servers or to support more than one user in this
    regard, please change this method accordingly
    @:param database_name (str) - The name of the database to connect to.
    @:raise util.InputValidationException - If the input arguments provided are invalid
    @:raise Exception - For any other occurring errors
    @:return cnx (mysql.connector.connection.MySQLConnection) - An active connection to the database"""
    connect_log = ambi_logger.get_logger(__name__)

    try:
        utils.validate_input_type(database_name, str)
        connection_dict = user_config.access_info['postgres_database']
    except utils.InputValidationException as ive:
        connect_log.error(ive.message)
        raise ive
    except KeyError as ke:
        error_msg = "Missing '{0}' key from the user_config.postgres_db_access dictionary!".format(
            str(database_name))
        connect_log.error(error_msg)
        raise ke

    try:
        cnx = psycopg2.connect(user=connection_dict['username'],
                               password=connection_dict['password'],
                               database=connection_dict['database'],
                               host=connection_dict['host'],
                               port=connection_dict['port'])
    except psycopg2.Error as err:
        connect_log.error(
            "Got a code {0} error with message: {1} when connecting to {2}:{3}.{4}"
            .format(str(err.pgcode), str(err.pgerror),
                    str(connection_dict['host']), str(connection_dict['port']),
                    str(connection_dict['database'])))
        # Catch any errors under a generic 'Error' exception and pass it upwards under a more specific MySQLDatabaseException
        raise PostgresDatabaseException(message=err.pgerror,
                                        error_code=err.pgcode,
                                        diag=err.diag)

    return cnx
def run_sql_statement(cursor, sql_statement, data_tuple=()):
    """The way python runs SQL statements is a bit convoluted, with plenty of moving parts and things that can go wrong. Since I'm going to run plenty of these along this project, it is a good idea to abstract this operation as much as
    possible
    @:param cursor (psycopg2.extensions.cursor) - A cursor object, obtained from an active database connection, that its used by python to run SQL statements as well as to process the results.
    @:param sql_statement (str) - THe SQL statement string to be executed, with its values not explicit but replaced by '%s' characters instead. This method takes care of this replacements.
    @:param data_tuple (tuple) - A tuple with as many elements as the ones to be replaced in the SQL string. The command that effectively runs the SQL statement takes two arguments: the original SQL string statement with '%s' elements
    instead of its values and a data tuple where those values are indicated in the expected order. The command then sends both elements across to be executed database side in a way that protects their content and integrity (supposedly, it
    wards against SQL injection attacks.
    @:raise utils.InputValidationException - If the input arguments fail their validations
    @:raise PostgresDatabaseException - For errors related to database operations
    @:raise Exception - For any other error that may occur.
    """
    run_sql_log = ambi_logger.get_logger(__name__)

    utils.validate_input_type(sql_statement, str)
    utils.validate_input_type(data_tuple, tuple)
    utils.validate_input_type(cursor, psycopg2.extensions.cursor)

    # Count the number of '%s' in the sql statement string and see if they match with the number of elements in the tuple
    if len(data_tuple) != sql_statement.count('%s'):
        error_msg = "Mismatch between the number of data tuple elements ({0}) and the number of replaceable '%s' in the sql statement string ({1})!".format(
            str(len(data_tuple)), str(sql_statement.count('%s')))
        run_sql_log.error(error_msg)
        raise PostgresDatabaseException(message=error_msg)

    # Done with the validations.
    try:
        cursor.execute(sql_statement, data_tuple)
    except psycopg2.Error as err:
        run_sql_log.error(err.pgerror)
        raise PostgresDatabaseException(message=err.pgerror,
                                        error_code=err.pgcode,
                                        diag=err.diag)

    return cursor
Пример #11
0
def handleOneWayDeviceRPCRequests(deviceId, remote_method, param_dict=None):
    """POST method to place an one way RPC call to a device identified by the 'deviceId' parameter provided as argument. The RPC command needs to have a valid target in the device (client) side to have any effect. This command should be specified
    in the 'remote_method argument' and any expected arguments required for its execution should be provided in the 'param_dict' argument
    This method is equivalent to a 'fire-and-forget' routine: no replies are expected nor are going to be returned either from the client side (this service doesn't wait for a response msg in the respective topic. This means that this method
    should be use for ACTUATE only. To get any information from the device side use the two way version of this method since all device-side communications are to be initiated from "this" side (the ThingsBoard/application side) and thus a response
    is expected following a request
    @:param deviceId (str) - The typical 32-byte, dash separated hexadecimal string that uniquely identifies the device in the intended ThingsBoard installation
    @:param remote_method (str) - The name of the method that is defined client-side that is to be executed with this call
    @:param param_dict (dict) - If the remote method requires arguments to be executed, use this dictionary argument to provide them
    @:return http_status_code (int) - This method only returns the HTTP status code regarding the execution of the HTTP request and nothing more

    Usage example: Suppose that there's a device configured in the Thingsboard installation that abstracts a lighting switch that can be turned ON or OFF. The device (Raspberry Pi) that controls that switch has a continuously running method (via a
    while True for instance) that listens and reacts to messages received from the ThingsBoard server instance. One of these control methods is called "setLightSwitch" and expects a boolean action "value" passed in the message strucure (True or
    False) regarding what to do with the light switch (False = OFF, True = ON). The switch is currently OFF. To turn it ON using this method, use the following calling structure:

    handleOneWayDeviceRPCRequests(deviceId="3806ac00-5411-11ea-aa0c-135d877fb643", remote_method="setLightSwitch", param_dict={"value": True})

    The message that is going to be forward to the device as a RPC request is the following JSON structure:
    {
        "method": "setLightSwitch",
        "params":
        {
            "value": true
        }
    }

    The "method" - "params" JSON is somewhat of a format for RPC interactions, with "params" being a complex (multi-level) dictionary to allow for complex, multi-argument remote method executions
    """

    one_way_log = ambi_logger.get_logger(__name__)

    # Validate inputs
    utils.validate_input_type(deviceId, str)
    utils.validate_id(entity_id=deviceId)

    utils.validate_input_type(remote_method, str)

    if param_dict:
        utils.validate_input_type(param_dict, dict)

    # The rest is pretty much more of the same. This one gets the deviceId built in the calling URL
    service_endpoint = '/api/plugins/rpc/oneway/' + deviceId

    # Crete the data payload in the dictionary format
    data = {
        "method": str(remote_method),
        "params": param_dict
    }

    service_dict = utils.build_service_calling_info(mac.get_auth_token(user_type="tenant_admin"), service_endpoint=service_endpoint)

    # Done. Set things in motion then
    try:
        response = requests.post(url=service_dict["url"], headers=service_dict["headers"], data=json.dumps(data))
    except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(str(service_dict['url']))
        one_way_log.error(error_msg)
        raise ce

    return response.status_code
Пример #12
0
def handleTwoWayDeviceRPCRequest(deviceId, remote_method, param_dict=None):
    """POST method to execute bidirectional RPC calls to a specific method and a specific device, all identified through the argument list passed to this method. If the method to execute requires arguments itself, use the param_dict argument to
    provide a dictionary with these arguments, in a key-value scheme (key is the name of the argument with the actual value passes as... value).
    This method implements a reply-response dynamic, as opposed to the previous 'fire-and-forget' approach. The server submits a RPC requests and then listens for a response with the same requestId in the response topic. The method blocks for a
    while until a valid response can be returned.
    The usage of this method is identical to the previous one.

    @:param deviceId (str) - The typical 32-byte, dash separated hexadecimal string that uniquely identifies the device in the intended ThingsBoard installation.
    @:param remote_method (str) - The name of the method that is defined client-side that is to be executed with this call.
    @:param param_dict (dict) - If the remote method requires arguments to be executed, use this dictionary argument to provide them.
    @:raise utils.InputValidationException - If any of the input fails initial validation
    @:raise utils.ServiceEndpointException - If the request was not properly executed
    @:return response (request.models.Response) - This method returns the object that comes back from the HTTP request using the 'requests' package. This object has loads of interesting information regarding the original request that created it.
    For this context, the most relevant fields are response.status_code (int), which has the HTTP response code of the last execution, and the response.text, which contains the response of the method that was called, if any. If the remote method
    doesn't return anything, this field comes back empty.
    """
    two_way_log = ambi_logger.get_logger(__name__)

    # Validate inputs
    utils.validate_input_type(deviceId, str)
    utils.validate_id(entity_id=deviceId)

    utils.validate_input_type(remote_method, str)

    if param_dict:
        utils.validate_input_type(param_dict, dict)

    # Set the endpoint
    service_endpoint = '/api/plugins/rpc/twoway/' + deviceId

    # Create the data payload as a dictionary
    data = {
        "method": str(remote_method),
        "params": param_dict
    }

    service_dict = utils.build_service_calling_info(mac.get_auth_token(user_type="tenant_admin"), service_endpoint=service_endpoint)

    # Send the request to the server. The response, if obtained, contains the response data
    try:
        response = requests.post(url=service_dict['url'], headers=service_dict['headers'], data=json.dumps(data))
    except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(str(service_dict['url']))
        two_way_log.error(error_msg)
        raise ce

    # Watch out for HTTP 408. In this project, when that code is returned (server timeout), it normally means that communication with the remote device was not established properly, by whatever reason
    if response.status_code == 408:
        error_msg = "Received a HTTP 408 - Server timed out. Could not get a response from device with id = {0}...".format(str(deviceId))
        two_way_log.error(error_msg)
        raise utils.ServiceEndpointException
    elif response.status_code == 200 and response.text == "":
        warn_msg = "Received a HTTP 200 - OK - But no response was returned..."
        two_way_log.warning(warn_msg)
        # Send back the result as a string, because that's what this method returns
        return "200"
    else:
        return response.text
def validate_sql_input_lists(column_list, table_name, trigger_column=False):
    """Since I need to repeat a series of validation steps for several SQL statement building methods that I'm writing, I might as well abstract the whole thing in a method to save precious hours of typing the same thing over and over
    again.
    @:param column_list (list of str) - a list with the names of the MySQL database columns whose information is to be added to
    @:param table_name (str) - The name of the table where the SQL statement is going to be executed
    @:param trigger_column (str) - An optional parameter given than only the UPDATE and DELETE statements use it (the WHERE trigger_column condition part of the statement goes in)
    @:return True (bool) - if the data is able to pass all validations
    @:raise utils.InputValidationException - If the input arguments are invalid
    @:raise Exception - For any other error types"""
    validate_sql_log = ambi_logger.get_logger(__name__)

    try:
        utils.validate_input_type(column_list, list)
        utils.validate_input_type(table_name, str)
        if trigger_column:
            utils.validate_input_type(trigger_column, str)
        for column in column_list:
            utils.validate_input_type(column, str)
    except utils.InputValidationException as ive:
        validate_sql_log.error(ive.message)
        raise ive

    if len(column_list) <= 0:
        error_msg = "The column list is empty!"
        validate_sql_log.error(error_msg)
        raise PostgresDatabaseException(message=error_msg)

    # If a trigger_column was provided, check if it is among the full list elements
    if trigger_column and trigger_column not in column_list:
        error_msg = "The trigger column provided ({0}) was not found in table {1}".format(
            str(trigger_column), str(table_name))
        validate_sql_log.error(error_msg)
        raise PostgresDatabaseException(message=error_msg)

    # Finally, use the get_table_column methods to return the list of columns for that table in question from the default database as check if all elements in the column list provided are indeed in the list returned from the database
    database_name = user_config.postgres_db_access['database']
    full_column_list = get_table_columns(database_name=database_name,
                                         table_name=table_name)

    for column in column_list:
        if column not in full_column_list:
            error_msg = "The column '{0}' provided in the argument list is not among the columns for {1}.{2}. Cannot continue!".format(
                str(column), str(database_name), str(table_name))
            validate_sql_log.error(error_msg)
            raise PostgresDatabaseException(message=error_msg)
    # All is good with my data. Send back an OK
    return True
def create_delete_sql_statement(database_name, trigger_column, table_name):
    """Method to automatize the building of SQL DELETE statements. These are generally simpler than UPDATE or INSERT ones
    @:database_name (str) - The name of the database in which this statement is going to be used. Needed for the validation of inputs
    @:param trigger_column (str) - The name of the column that is going to be used in the DELETE statement (The WHERE trigger_column condition part goes). As with the UPDATE method, the DELETE statements produced through here are quite
    simple, i.e., the triggering condition is an equality and hence only one record at a time can be deleted via this method
    @:param table_name (str) - The name of the table where the DELETE statement is going to take effect
    @:return sql_delete (str) - The statement string to be executed with '%s' instead of values. These need to replaced afterwards in the parent function
    @:raise utils.InputValidationException - If the input arguments are invalid"""

    utils.validate_input_type(database_name, str)
    utils.validate_input_type(table_name, str)

    # Though this method doesn't require a full column list (the DELETE statement doesn't requires it), its going to be useful to get it anyway at this point so that I can use my validate_sql_input_lists() method to validate the whole
    # set of arguments in one sitting
    column_list = get_table_columns(database_name, table_name)

    # Got the full column list. I can now run the sql validation method
    validate_sql_input_lists(column_list, table_name, trigger_column)

    # So far so good. Carry on with the statement build
    sql_delete = """DELETE FROM """ + str(table_name) + """ WHERE """ + str(
        trigger_column) + """ = %s;"""

    return sql_delete
Пример #15
0
def gather_latest_data(collection_interval, device_name_list=None):
    """
    This method abstracts the periodic collection of environmental data from all configured devices
    :param collection_interval (datetime.timedelta) - A time window for data collection. Only records with a timestamp between the current datetime and the other end of the time window defined this way are considered
    :param device_name_list (list of str) - Provide a list of device names in this argument if you wish that the data update to be limited to them. Leave it as None to update all devices currently in the tb_devices table.
    :raise utils.InputValidationException - If any inputs fail initial validation
    :raise mysql_utils.MySQLDatabaseException - If any problems are encountered when dealing with the database
    """
    utils.validate_input_type(collection_interval, datetime.timedelta)
    utils.validate_input_type(device_name_list, list)
    for device_name in device_name_list:
        utils.validate_input_type(device_name, str)

    mysql_telemetry_controller.populate_device_data_table(collection_time_limit=collection_interval, device_name_list=device_name_list)
Пример #16
0
def add_table_data(data_dict, table_name):
    """
    Method that abstracts the insertion of data into the provided database table. The method maps the provided data dictionary to any available column in the table identified in table name. The method uses the table data as main reference, i.e.,
    it only writes data whose key in the data dictionary has a direct correspondence to a table column in the database. If more the data dictionary has more keys/items than available columns, an log warning is issued about it but the method
    carries on writing in all available data.
    @:param data_dict (dict) - A dict structure, i.e., a key-value arrangement with the data to be added/updated into the database table. IMPORTANT: The table columns name were prepared such that there's a one-to-one equivalence between them and
    the expected keys in the data dictionary.
    @:param table_name (str) - The name of the database where the data dict has to be written into.
    @:raise utils.InputValidationException - If any of the inputs fails initial validation.
    @:raise mysql_utils.MySQLDatabaseException - If any issues occur with the database accesses.
    @:return result (bool) - If the database addition/update was performed successfully, this method returns True. Otherwise, the appropriate exception is raised with the details on why it was raised in the first place.
    """
    log = ambi_logger.get_logger(__name__)

    # Validate inputs
    utils.validate_input_type(data_dict, dict)
    utils.validate_input_type(table_name, str)

    # Prepare the database access objects
    database_name = user_config.access_info['mysql_database']['database']
    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)
    change_cursor = cnx.cursor(buffered=True)

    # First, check if the table exists
    sql_select = """SHOW tables FROM """ + str(database_name) + """;"""

    select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                  sql_statement=sql_select,
                                                  data_tuple=())

    records = select_cursor.fetchall()

    # The result of the previous fetchall command is a list of one element tuples, hence why I'm putting the name that I want to verify its existence in such list as a one element tuple. The alternative was to format the previous list into a
    # single string list element, but it is way more simple this way
    if (table_name, ) not in records:
        error_msg = "The table name provided: {0} doesn't exist yet in database {1}. Cannot continue.".format(
            str(table_name), str(database_name))
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Okay, the table exists in the database. Get all its columns into a list
    column_list = mysql_utils.get_table_columns(database_name=database_name,
                                                table_name=table_name)

    # And create the standard INSERT statement from it
    sql_insert = mysql_utils.create_insert_sql_statement(
        column_list=column_list, table_name=table_name)

    # Build the respective data tuple by going through all column names and checking if there is a corresponding key in the data dictionary
    data_list = []

    for i in range(0, len(column_list)):
        try:
            # First, try to retrieve a value into the data list by doing a direct retrieval from the data dictionary using the column name as key
            data_list.append(data_dict[column_list[i]])

        # If the current column name doesn't have a matching key in the data dictionary, catch the expected Exception
        except KeyError:
            # And replace the missing value with a None since, by default, all table columns were created in a way where they hold such value
            # But warn the user first
            log.warning(
                "Didn't find any '{0}' keys in the data dictionary provided. Setting the {0} column in {1}.{2} to NULL"
                .format(str(column_list[0]), str(database_name),
                        str(table_name)))
            # And set the value then
            data_list.append(None)

    # Done. Proceed with the INSERT
    try:
        change_cursor = mysql_utils.run_sql_statement(
            cursor=change_cursor,
            sql_statement=sql_insert,
            data_tuple=tuple(data_list))

        # Check the outcome of the previous execution. If no columns were changed in the previous statement, raise a 'Duplicate entry' Exception to trigger an UPDATE instead
        if change_cursor.rowcount is 0:
            # No changes to the database table detected. Trigger an UPDATE then
            raise mysql_utils.MySQLDatabaseException(
                message=proj_config.double_record_msg)
        elif change_cursor.rowcount == 1:
            # In this case, all went well. Close the database access objects, commit the changes to the database, inform the user of this and move on
            log.info("Successfully added a new record to {0}.{1}".format(
                str(database_name), str(table_name)))
            cnx.commit()
            select_cursor.close()
            change_cursor.close()
            cnx.close()
            return True

    # Watch out for the typical "Duplicate entry" exception
    except mysql_utils.MySQLDatabaseException as mse:
        if proj_config.double_record_msg in mse.message:
            trigger_column_list = mysql_utils.get_trigger_columns(
                table_name=table_name)

            # Cool. Use this data to get the respective UPDATE statement
            sql_update = mysql_utils.create_update_sql_statement(
                column_list=column_list,
                table_name=table_name,
                trigger_column_list=trigger_column_list)

            # And complete the existing data list by appending to it the values corresponding to the elements in the trigger list
            for trigger_column_name in trigger_column_list:
                try:
                    data_list.append(data_dict[trigger_column_name])
                except KeyError:
                    error_msg = "The value for the trigger column '{0}' cannot be found among the data dictionary elements! Cannot continue!".format(
                        str(trigger_column_name))
                    log.error(error_msg)
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    raise mysql_utils.MySQLDatabaseException(message=error_msg)

            # Done. Run the UPDATE statement, still looking for duplicate records
            try:
                change_cursor = mysql_utils.run_sql_statement(
                    cursor=change_cursor,
                    sql_statement=sql_update,
                    data_tuple=tuple(data_list))

                # Check the UPDATE execution status
                if change_cursor.rowcount is 0:
                    # If nothing happened, the record already exists in the database. Give a bit of heads up and move on
                    log.warning(
                        "A record with data:\n{0}\n already exists in {1}.{2}. Nothing more to do..."
                        .format(str(data_list), str(database_name),
                                str(table_name)))
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    return False
                # Else, if more than one records were modified
                if change_cursor.rowcount != 1:
                    error_msg = "Could not execute\n{0}\nin {1}.{2}. Cannot continue..".format(
                        str(change_cursor.statement), str(database_name),
                        str(table_name))
                    log.error(error_msg)
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    raise mysql_utils.MySQLDatabaseException(message=error_msg)
                else:
                    info_msg = "Updated record with  successfully in {0}.{1}".format(
                        str(database_name), str(table_name))
                    log.info(info_msg)
                    cnx.commit()
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()
                    return True
            except mysql_utils.MySQLDatabaseException as mse:
                # If a duplicate result was still found with the last UPDATE execution
                if proj_config.double_record_msg in mse.message:
                    # Inform the user with a warning message
                    warn_msg = "The record with "
                    for i in range(0, len(trigger_column_list) - 1):
                        warn_msg += str(trigger_column_list[i]) + " = " + str(
                            data_dict[trigger_column_list[i]]) + ", "

                    warn_msg += str(trigger_column_list[-1]) + " = " + str(data_dict[trigger_column_list[-1]]) + " already exists in {0}.{1}. Nothing to do then..." \
                        .format(str(database_name), str(table_name))

                    log.warning(warn_msg)

                    # Close out all database access objects
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()

                    # Nothing else that can be done here. Move out
                    return True
                else:
                    # Some other Exception was raised then
                    select_cursor.close()
                    change_cursor.close()
                    cnx.close()

                    # Forward the Exception then
                    raise mse
        else:
            select_cursor.close()
            change_cursor.close()
            cnx.close()
            # Something else must have happened then. Keep on raising the Exception
            raise mse
Пример #17
0
def update_devices_table(customer_name=False):
    """The logic behind this module is quite similar to the one employed in the update_tenant_table(): it gets a similar data structure in (with all the same annoying problems), has to do the same kind of processing and so on. As with the other
    method, I'm going to write a insert and an update methods that can call each other depending on the context: both methods detect what is going on in the database and then act accordingly.
    @:param customer_name (str) - OPTIONAL parameter. There are essentially multiple ways to retrieve device dictionary data from the remote API. So far, I've created support for retrieving device data from the getTenantsDevices method,
    using just tenants data, and the getCustomerDevices, using customer data instead. Overall, the data returned comes in the same format in both cases, hence why I want to use just one method to process the device data. The difference is,
    using tenant data only retrieves devices that are associated to a tenant, as well as using customer data only returns devices associated to a customer. So, ideally, I should use both methods and merge the resulting list before running the
    table_updater method. The problem is that the customer based method requires a customer_id that is retrieved using a more memorable customer_name (and also because I've implemented a more flexible way to retrieve this data when the complete
    customer_name is not completely known), so I can only get the additional result sets if the customer_name is passed on to this method. So, if this argument is omitted, this method uses just the tenant data. If not, both data sets are retrieved.
    @:raise utils.InputValidationException - If the inputs fail validation
    @:raise Exception - If other errors occur.
    """
    module_table_key = 'devices'
    # Use the same limit value for both calls
    # limit = 50
    pageSize = 100
    page = 0
    update_devices_log = ambi_logger.get_logger(__name__)  # __name__ is a method fingerprint (from python native) and contains a unique path

    # Get the base response using just tenant data
    # tenant_response = tb_device_controller.getTenantDevices(limit=limit)
    tenant_response = tb_device_controller.getTenantDevices(pageSize=pageSize, page=page)

    # Translate the stuff that comes from the ThingsBoard API as PostGres-speak to Python-speak before forwarding the data
    tenant_response_dict = eval(utils.translate_postgres_to_python(tenant_response.text))  # Converts the response.text into a dictionary (eval is a python native method :D )

    # Test if all results came back with the current limit setting
    if tenant_response_dict['hasNext']:
        update_devices_log.warning("Not all results from the remote API were returned on the last call using tenant data (limit = {0}). Raise the limit parameter to retrieve more of them.".format(str(limit)))

    # Extract the device data to a list
    tenant_device_list = tenant_response_dict['data']

    customer_device_list = None
    # Check if its possible to use the customer data too to retrieve customer associated devices
    if customer_name:
        # Validate it first
        try:
            utils.validate_input_type(customer_name, str)
        except utils.InputValidationException as ive:
            update_devices_log.error(ive.message)
            raise ive

        # Input validated. Proceed to query the API using the same limit
        customer_response = tb_device_controller.getCustomerDevices(customer_name=customer_name, limit=limit)

        # Translate it to Python and cast the response to a dictionary
        customer_response_dict = eval(utils.translate_postgres_to_python(customer_response.text))

        # Test if the customer bound results were truncated by the limit value
        if customer_response_dict['hasNext']:
            update_devices_log.warning("Not all results from the remote API were returned on the last call using customer data (limit = {0}). Raise the limit parameter to retrieve more of them.".format(str(limit)))

        # Extract the records into a list
        customer_device_list = customer_response_dict['data']

    # And then try to add it, one by one, to the database table. The lazy way to do this is to add records indiscriminately to the database and let it, along with the database_table_updater module that is called to do just that,
    # to sort through possible repeated records (devices can be associated to customers and to tenants simultaneously) with their internal tools. But fortunately I've detected that both device data retrieval methods use the exact same data
    # structure to format these results, hence a quick comparison should be enough to be able to collate a list with only one record per device in it

    # Start by picking the tenant device list as the default
    device_list = tenant_device_list

    # If another list was retrieved from the customer data too
    if customer_device_list:
        # Go through all the customer associated devices
        for customer_device in customer_device_list:
            # And if a record is deemed repeated
            if customer_device in device_list:
                # Ignore it and move to the next one in line
                continue
            # Otherwise
            else:
                # Add it to the main list
                device_list.append(customer_device)

    # The rest of the code should work with either just a tenant based device list or one with also customer based devices
    for device in device_list:
        # Unlike the tenant processing method, the devices data has a couple of redundant fields that I decide to remove for sake of simplicity. Namely, the result dictionary for each device entry returns two keys: tenantId and CustomerId which
        # are sub-dictionaries with the format {'entityType': string, 'id': string}. I'm only interested in the id field (because I can use it later to do JOIN statements using the id field and main correlation). The entityType associated value
        # for those case is 'TENANT' and 'CUSTOMER', which is a bit redundant given that it is already implicit in the parent key. As such, I decided to create database columns named respectively tenantId and customerId but are set to VARCHAR type
        # to store just the id string. So, for this to work later on I need to replace these sub-dictionaries by just the id strings. Otherwise the list of values is not going to match the number of database columns
        device['tenantId'] = device['tenantId']['id']
        device['customerId'] = device['customerId']['id']

        # I still have one more customization to do in this service. Subsequent calls for device data from the ThingsBoard remote API require 5 specific and mandatory elements: the entityType, entityId, timeseriesKey,
        # startTimestamp and endTimestamp. The first 2 are covered by the device_controller.getTenantDevices method and the last 2 are set by the user (not method dependent). So I'm only missing the timeseriesKey at this point to be able to do
        # bulk requests for device data. To obtain that, I need to place a specific call to a remote API service, namely the telemetry_controller.getTimeseriesKey method. This method requires the device's entityKey and entityId that were just
        # returned from the previous API call. This method, if correctly call, returns a single string with the name that the PostGres database from the API side is using to store the device data. It is not optimal to create a single table with
        # just a column with this data (or even with two additional entityType and entityId) when I can simply add a new one to the existing thingsboard_devices_table and place a call at this point to the other API service that returns just that
        # and concatenate it to the existing data dictionary. The thingsboard_devices_table already has an 'extra' column names timeseriesKey at the end to include this element so now its just a matter of putting it into the dictionary to return.
        # Humm... it seems that there are sensors that can provide readings from multiple sources (the device can be a multi-sensor array that uses a single interface to communicate
        # The return from the next statement is always a list with as many elements as the number of supported timeSeriesKeys by the device identified (one per supported reading/sensor)
        timeseries_keys = tb_telemetry_controller.getTimeseriesKeys(device['id']['entityType'], device['id']['id'])

        # The database entry needs to contain all elements returned in the list in timeseries_keys variable
        # Add an extra entry to the device dictionary to be used on the database population operation. The str.join() operation is going to concatenate all elements in the timeseries_keys using a comma to separate them in a single string.
        # Also, this approach has the advantage that if an API request is built from this data (retrieved from the database of course), this format allows for a direct call - no post processing required at all. This is because of how these types
        # of remote API requests are constructed: the endpoint request takes a query for multiple timeseries keys from one single device as comma-separated strings with no spaces.
        # For example, querying for the timeseries values from a device with 4 sensors attached that can produce 4 types of different readings implies the following endpoint to be sent in a URL to the remote service:
        # http://localhost:8080/api/plugins/telemetry/DEVICE/3f0e8760-3874-11ea-8da5-2fbefd4cb87e/values/timeseries?limit=3&agg=NONE&keys=humidity,temperature,pressure,lux&startTs=1579189110790&endTs=1579193100786
        # The 'keys' part of the last string shows how this request must be constructed and that implies all parameters in a single string, separated by commas and without any spaces in between.
        device['timeseriesKeys'] = ",".join(timeseries_keys) if hasattr(timeseries_keys, "__iter__") else ""
        # Done. Carry on with the database stuff

        # Same old, same old. Expand the device dictionary to a single level one first
        device = utils.extract_all_key_value_pairs_from_dictionary(input_dictionary=device)

        # And replace any annoying POSIX timestamps for datetime.datetime objects
        try:
            device['createdTime'] = mysql_utils.convert_timestamp_tb_to_datetime(timestamp=device['createdTime'])
        except KeyError:
            pass

        database_table_updater.add_table_data(device, proj_config.mysql_db_tables[module_table_key])
def getLatestTimeseries(device_name, timeseries_keys_filter=None):
    """
    This method is analogous to the previous one, i.e., it also retrieves Timeseries data that is associated to the device identified by 'device_name', but in this particular case only one timestamp/value pair is returned for each of the device's
    measurements, namely the last one recorded by the Thingsboard installation that oversees that device. This method is very useful to:
    1. Determine if a device is working by retrieving the last recorded data.
    2. Determine the timeseries keys associated to the device, as well the last timestamp associated to them.
    3. Determine the most recent end_date possible for that device in a direct way - once this parameter is known, a more complete and insightful getTimeseries call can then be placed.
    @:param device_name (str) - The name of the device to which the latest associated timeseries should be retrieved by this method.
    @:param timeseries_keys_filter (list of str) - A list with the names of the timeseries keys to be returned by the remote API. If a valid list is provided, only data for the keys specified in this list are going to be returned. This method
    validates this list against any associated timeseriesKeys for the device: mismatched elements from this list are to be ignored.
    @:raise utils.InputValidationException - If any of the inputs fails initial validation
    @:raise utils.ServiceEndpointException - If errors occur when invoking any remote API services
    @:raise mysql_utils.MySQLDatabaseException - If errors occur when accessing the database
    @:return None if the API returns an empty set, otherwise returns a dictionary with the following format:
    device_data =
    {
        "timeseriesKey_1": [
            {
              "ts": int,
              "value": str
            }
        ],
        "timeseriesKey_2": [
            {
              "ts": int,
              "value": str
            }
        ],
        ...
        "timeseriesKey_N": [
            {
                "ts": int,
                "value": str
            }
        ]
    }
    """
    log = ambi_logger.get_logger(__name__)

    utils.validate_input_type(device_name, str)
    if timeseries_keys_filter:
        utils.validate_input_type(timeseries_keys_filter, list)

        for ts_key in timeseries_keys_filter:
            utils.validate_input_type(ts_key, str)

    # Grab the device credentials at this point. If the method returns anything (not None), than assume that its the following tuple: (entityType, entityId, timeseriesKeys_list)
    device_cred = mysql_device_controller.get_device_credentials(
        device_name=device_name)

    if device_cred is None:
        error_msg = "Could not get valid credentials for device '{0}'. Cannot continue...".format(
            str(device_name))
        log.error(error_msg)
        raise mysql_utils.MySQLDatabaseException(message=error_msg)

    # Validate the timeseries keys, if any were provided
    keys = None
    if timeseries_keys_filter:
        valid_keys = []
        for ts_filter_key in timeseries_keys_filter:
            # Filter out only the valid keys, i.e., the ones with a correspondence in the list returned from the database
            if ts_filter_key in device_cred[2]:
                valid_keys.append(ts_filter_key)

        # Check if at least one of the proposed keys made it to the valid list. If not, default to the list returned from the database (if this one is also not empty)
        if not len(valid_keys):
            log.warning(
                "Could not validate any of the filter keys ({0}) provided as argument!"
                .format(str(timeseries_keys_filter)))

            # Check if the timeseriesKeys element returned from the device credentials request was a single element list with an empty string inside it
            if len(device_cred[2]) == 1 and device_cred[2][0] == "":
                log.warning(
                    "The database didn't return any valid set of timeseriesKeys. Omitting this argument in the API call"
                )
                # Don't do anything else. The 'keys' parameter is already None. Keep it as that then
        else:
            keys = ",".join(valid_keys)

    # I have all I need to execute the remote API call
    service_endpoint = "/api/plugins/telemetry/{0}/{1}/values/timeseries".format(
        str(device_cred[0]), str(device_cred[1]))

    # Add the keys filter, if it was provided
    if keys is not None:
        service_endpoint += "?keys={0}".format(str(keys))

    # Service endpoint is done. Grab the service calling dictionary
    service_dict = utils.build_service_calling_info(
        auth_token=mac.get_auth_token(user_type='tenant_admin'),
        service_endpoint=service_endpoint)

    # Execute the remote call finally
    try:
        response = requests.get(url=service_dict['url'],
                                headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout):
        error_msg = "Unable to establish a connection with {0}...".format(
            str(service_dict['url']))
        log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)

    # Check the HTTP status code in the response
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received HTTP {0} with message {1}!".format(
            str(response.status_code), str(eval(response.text)['message']))
        log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # Send back the response already in dictionary form
        return eval(response.text)
def getAttributes(entityType=None, entityId=None, deviceName=None, keys=None):
    """
    GET method to retrieve all server-type attributes configured for the device identified by the pair
    entityType/entityId or deviceName provided. This method requires either the entityType/entityId pair or the
    deviceName to be provided to execute
    this method. If insufficient data is provided, the relevant Exception shall be raised.
    :param entityType (str) - The entity type of the object whose attributes are to be retrieved.
    :param entityId (str) - The id string that identifies the device whose attributes are to be retrieved.
    :param deviceName (str) - The name of the device that can be used to retrieve the entityType/entityId.
    :param keys (list of str) - Each attribute returned is a key-value pair.
    Use this argument to provide a key based filter, i.e., if this list is set,
    only attributes whose keys match any of the list elements are to be returned.
    :raise utils.InputValidationException - If any of the inputs has the wrong data type or the method doesn't have the necessary data to execute.
    :raise utils.ServiceEndpointException - If problem occur when accessing the remote API
    :return attribute_dictionary (dict) - A dictionary with the retrieved attributes in the following format:
        attribute_dictionary =
        {
            'attribute_1_key': 'attribute_1_value',
            'attribute_2_key': 'attribute_2_value',
            ...
            'attribute_N_key': 'attribute_N_value'
        }
        where the keys in the dictionary are the ontology-specific names (official names) and the respective values
        are the timeseries keys being measured by the device that map straight into those ontology names.
        If the device identified by the argument data does exist but doesn't have any attributes configured,
        this method returns None instead.
    """
    log = ambi_logger.get_logger(__name__)

    # Validate inputs
    if entityId:
        utils.validate_id(entity_id=entityId)

        # The entityId seems OK but its useless unless the entityType was also provided or, at least, the deviceName, the method cannot continue
        if not entityType and not deviceName:
            error_msg = "A valid entityId was provided but no entityType nor deviceName were added. Cannot execute this method until a valid entityType/entityId or a valid deviceName is provided!"
            log.error(error_msg)
            raise utils.InputValidationException(message=error_msg)

    if entityType:
        utils.validate_entity_type(entity_type=entityType)

        # Again, as before, the method can only move forward if a corresponding entityId or deviceName was also provided
        if not entityId and not deviceName:
            error_msg = "A valid entityType was provided but no corresponding entityId nor deviceName. Cannot continue until a valid entityType/entityId or a valid deviceName is provided!"
            log.error(error_msg)
            raise utils.InputValidationException(message=error_msg)

    if deviceName:
        utils.validate_input_type(deviceName, str)

    if keys:
        utils.validate_input_type(keys, list)
        for key in keys:
            utils.validate_input_type(key, str)

    # If the code got to this point, I either have a valid entityId/entityType pair or a deviceName. Check if only the deviceName was provided and retrieve the entityId/entityType from it
    if deviceName and (not entityType or not entityId):
        # Get the entityId and entityType from the deviceName provided
        device_data = mysql_device_controller.get_device_credentials(
            device_name=deviceName)

        # Check if the previous statement returned a non-empty (not None) result. If that is the case, either the device is not (yet) configured in the device table or the table needs to be updated
        if not device_data:
            error_msg = "Cannot retrieve a pair of entityId/entityType from the device name provided: {0}. Either:" \
                        "\n1. The device is not yet configured in the database/ThingsBoard platform" \
                        "\n2. The MySQL device table needs to be updated." \
                        "\nCannot continue for now".format(str(deviceName))
            log.error(error_msg)
            raise utils.InputValidationException(message=error_msg)

        # The previous method returns a 3-element tuple in the format (entityType, entityId, timeseriesKeys). Grab the relevant data straight from it
        entityType = device_data[0]
        entityId = device_data[1]

    # Validation complete. I have all I need to execute the remote call
    service_endpoint = "/api/plugins/telemetry/{0}/{1}/values/attributes".format(
        str(entityType), str(entityId))

    # If a list of keys was provided, concatenate them to the current endpoint
    if keys:
        service_endpoint += "?keys="

        # Add all the keys to the endpoint concatenated in a single, comma separated (without any spaces in between) string
        service_endpoint += ",".join(keys)

    # Build the service dictionary from the endpoint already built
    service_dict = utils.build_service_calling_info(
        mac.get_auth_token(user_type='tenant_admin'),
        service_endpoint=service_endpoint)

    # Query the remote API
    try:
        response = requests.get(url=service_dict['url'],
                                headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(
            str(service_dict['url']))
        log.error(error_msg)
        raise utils.ServiceEndpointException(message=ce)

    # If a response was returned, check the HTTP return code
    if response.status_code != 200:
        error_msg = "Request not successful: Received an HTTP " + str(
            eval(response.text)['status']) + " with message: " + str(
                eval(response.text)['message'])
        log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # Got a valid result. Format the returned objects for return
        data_to_return = eval(utils.translate_postgres_to_python(
            response.text))

        if len(data_to_return) is 0:
            # Nothing to return then. Send back a None instead
            return None

        # If the request was alright, I've received the following Response Body (after eval)
        # data_to_return =
        # [
        #   {
        #       "lastUpdateTs": int,
        #       "key": str,
        #       "value": str
        #   },
        # ...
        #   {
        #       "lastUpdateTs": int,
        #       "key": str,
        #       "value": str
        #   }
        # ]
        #
        # So I need to transform this into the return structure defined above

        attribute_dictionary = {}
        for attribute_pair in data_to_return:
            # Use this opportunity to filter out any attribute returned that is not part of the measurement list desired
            if attribute_pair['value'] not in proj_config.ontology_names:
                # If the attribute value is not one in the 'official list of names', skip it
                continue
            # Create the entries defined in the man entry of this method from the list elements returned from the remote API
            attribute_dictionary[
                attribute_pair['key']] = attribute_pair['value']

        # All done. Return the attributes dictionary
        return attribute_dictionary
def getTimeseriesKeys(entityType, entityId):
    """This method executes the GET request that returns the name (the ThingsBoard PostGres database key associated to the Timeseries table) of the variable whose quantity is being produced by the element identified by the pair (entityType,
    entityId). This method is limited to 'DEVICE' type elements (it really doesn't make sense for any other type and that's why I should validate this against the allowed entityTypes).
    @:type user_types allowed for this service: TENANT_ADMIN, CUSTOMER_USER
    @:param entityType (str) - One of the elements in the config.thingsboard_supported_entityTypes dictionary, though for this particular method only 'DEVICE' type elements are allowed (the remote API returns an empty set otherwise)
    @:param entityId (str) - The associated id string. The expected format is 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxx', where x is an hexadecimal character.
    @:return response (str) - A string identifying the quantity being measured by the device identified by the input arguments (e.g., 'temperature', 'water consumption', etc..)
    @:raise utils.InputValidationException - If any of the inputs fails validation
    """

    timeseries_key_log = ambi_logger.get_logger(__name__)
    # Input validation
    try:
        utils.validate_input_type(entityType, str)
        utils.validate_input_type(entityId, str)
    except utils.InputValidationException as ive:
        timeseries_key_log.error(ive.message)
        raise ive

    # For this case, I'm not even bothering checking if the entityType is one of the allowed one (config.thingsboard_supported_entityTypes) - This method only makes sense if the entityType is a DEVICE (including being all caps)
    error_msg = None
    expected_entity_type = 'DEVICE'

    if entityType.upper() != expected_entity_type:
        # Eliminate any potential issues with non-upper case characters passed in the entityType in one fell swoop
        error_msg = "The entityType provided is not {0}. This method is restricted to this entity Type!".format(
            str(expected_entity_type))
    # Validate the entityId string: check the format and if its characters are indeed all hexadecimal
    # The id strings are very strict regarding their format. I can either place bogus calls to the remote API server and catch for a HTTP 500 response or I can use Python to build a more intuitive and helpful logic to achieve the desired format,
    # since this information was only obtained by direct observation of the field itself - the id field is a 36 (32 bytes of data + 4 bytes for the '-') byte one with specific characters ('-') at positions 8, 13, 18 and 23.
    elif len(entityId) != 36:
        error_msg = "The entityId string provided has the wrong format: Wrong number of characters ({0}). Please provide a 36 character string.".format(
            str(len(entityId)))
    elif entityId[8] != '-' or entityId[13] != '-' or entityId[
            18] != '-' or entityId[23] != '-':
        error_msg = "The entityId doesn't have the expected format (expect a '-' at character positions 8, 13, 18 and 23): {0}".format(
            str(entityId))
        # Get out of the loop if as soon as an error is detected
    else:
        # Start by getting all the hexadecimal blocks in a nice list and removing the '-' character in the process
        segments = entityId.split('-')
        # The easiest way to test if a given string only has hexadecimal characters in it is to try to cast it to an integer forcing a base 16 in the process, i.e., doing a decoding operation assuming a base 16 reference. If any of the characters
        # in any of those segments is not hexadecimal (0-F), the operation throws a ValueError Exception that I can caught
        for segment in segments:
            try:
                int(segment, 16)
            except ValueError:
                error_msg = "One of the entityId string has non-hexadecimal characters in it: {0}. Please provide a 36 long, hexadecimal string for this field.".format(
                    str(segment))
                # End this loop as soon something wrong is detected
                break

    if error_msg:
        timeseries_key_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Validation done. Lets get data from the remote API then
    service_endpoint = "/api/plugins/telemetry/"

    service_endpoint += "{0}/{1}/keys/timeseries".format(
        str(expected_entity_type), str(entityId))
    # NOTE: This particular service requires a REGULAR type authorization token, so admin=False
    service_dict = utils.build_service_calling_info(
        mac.get_auth_token(user_type='tenant_admin'), service_endpoint)

    try:
        response = requests.get(url=service_dict['url'],
                                headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(
            str(service_dict['url']))
        timeseries_key_log.error(error_msg)
        raise ce

    # Test the HTTP status code in the response (I'm only continuing if it is a 200 and, in this particular case, a single str element was returned)
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received an HTTP {0} with message: {1}".format(
            str(eval(response.text)['status']),
            str(eval(response.text)['message']))
        timeseries_key_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # The objective here is to return the string with the key that I need to call another endpoint service to get actual data from the device. If the data is correct and there's a key associated to that, I should get a single list with a
        # string as its only element back. If the device exists but it still doesn't have a timeSeries associated to it, I would get an empty list back
        # Start to cast the response.text to a list
        result = eval(response.text)

        try:
            # Raise an Exception if the data type obtained is different from the expected
            utils.validate_input_type(result, list)
        except utils.InputValidationException as ive:
            timeseries_key_log.error(ive.message)
            raise ive

        # In some cases I may have a multi-sensor device (which is actually the case in our prototype setting), which means that this result may be an array of all the supported timeseries keys. In this case, the return needs to be processed that way
        # Check if, at least, one result was returned
        if not len(result):
            # Warn the user first
            timeseries_key_log.warning(
                "There are no keys associated to {0} with id {1} yet!".format(
                    str(entityType), str(entityId)))
            # Return a result that is passable to be used by another calling method
            return None
        else:
            # If all goes well, return the expected value. NOTE: This method returns these results as is, i.e., just like they are returned from the remote API, which means that 'result' is going to be a list that potentially may have multiple
            # elements (if the DEVICE identified by the ID has multiple sensors measuring different variables at once). The method that consumes these results should take heed on this
            return result
def getTimeseries(device_name,
                  end_date,
                  start_date=None,
                  time_interval=None,
                  interval=None,
                  limit=100,
                  agg=None,
                  timeseries_keys_filter=None):
    """This method is the real deal, at least to establish a base methodology to retrieve hard data from the remote API server. Unlike other API based methods so far, this one requires some data to be present in the MySQL server already because
    that is where the actual method call input data is going to come from. The remote API service that retrieves the requested data requires 5 mandatory elements (the optional arguments are explicit in the calling signature of this method where
    they are set to their default values already, in case that down the line there is a need to use them): entityType, entityId, keys, startTs and endTs. The first 3 parameters are going to be retrieved with a call to the MySQL
    thingsboard_devices_table and the timestamp ones are going to be determined from the triplet start_time (mandatory), ent_time and time_interval (only one of these is required). The method returns a dictionary with a list of timestamp,
    value pairs that can or cannot be limited by the limit value
    @:type user_types allowed for this service: TENANT_ADMIN, CUSTOMER_USER
    @:param device_name (str) - The name of the device to retrieve data from (e.g., 'Thermometer A-1', 'Water Meter A-1', etc... whatever the string used when registering the device in the ThingsBoard system). This value is certainly easier to
    retained and/or memorized from the user than the id string, for instance.
    @:param end_date (datetime.datetime) - A datetime.datetime object, i.e., in the format YYYY-MM-DD hh:mm:ss but that belongs to the datetime.datetime class. This is the latest value of the interval and, to avoid invalid dates into the input (
    like future dates and such) this one is mandatory. The interval to be considered is going to be defined by either start_time (earliest) -> end_time (latest) or end_time - time_interval (in seconds) -> end_time, but one of the next two input
    arguments has to be provided.
    @:param start_date (datetime.datetime) - A datetime.datetime object delimiting the earliest point of the time interval for data retrieval
    @:param time_interval (int) - An interval, in seconds, to be subtracted to the end_time datetime object in order to define the time window to return data from
    @:param interval (int) - This is an OPTIONAL API side only parameter whose use still eludes me... so far I've tried to place calls to the remote service with all sorts of values in this field and I'm still to discover any influence of it in
    the returned results. NOTE: My initial assumption was it to be able to be set as a API side version of my time_interval. Yet, that is not the case because the API requires both the end and start timestamps to be provided by default.
    @:param limit (int) - The number of results to return in the request. Device data can be quite a lot to process and that's why this parameter, though optional, is set to 100 by default. Two things with this value: though the API doesn't
    explicitly says so, it doesn't like limit <= 0. It doesn't return an error per se but instead the service gets stuck until eventually an HTTP 503 - Service Unavailable is thrown instead. As such I'm going to validate this input accordingly.
    Also, unlike other limit parameters so far, there's no indication in the response structure if the number of results returned were limited by this value or by the time interval defined. To provide the user with more helpful information in this
    regard, this method is going to count the number of returned results and, if they do match the limit value provided, warn the user about it.
    @:param agg (str) - No idea what this one does too... The API testing interface has it set to NONE by default, though it is an optional parameter whose effect on the returned results is still yet to be understood. ALl I know so far is that the
    remote API expects a string on it
    @:param timeseries_keys_filter (list of str) - A list with strings with the keys to be returned from the remote API. Some devices contain multiple sensors, which means that there are going to be multiple records from different variables under
    the same device ID. To limit the returned results to a sub set of all parameters, provide a list in this argument with the correct names to limit the entries to be returned. Omitting this parameter (which defaults to None) returns all
    timeseries keys under the provided device ID
    @:return result_list (list of tuple) - The returned results are going to be processed and returned as a list of 2 element-tuples: a timestamp and the associated value for the timeseriesKey provided.
    @:raise utils.InputValidationException - If any of the inputs provided fails validation
    @:raise utils.ServiceEndpointException - If something goes wrong with any of the external service calls to the remote API executed in the method
    @:raise mysql_utils.MySQLDatabaseException - For errors derived from the MySQL database accesses
    @:raise Exception - For any other detected errors during the method's execution
    """
    timeseries_log = ambi_logger.get_logger(__name__)

    # Before moving forward, check if at least one of the start_time, time_interval inputs was provided. NOTE: If both inputs are present, i.e., not None, the method validates both and if both are valid it prioritizes start_time over
    # time_interval. If one of them happens to be invalid, the method execution is not stopped but the user gets warned (through the logger) about this and how the method is going to be operated. But at this stage, I'm only moving forward if I
    # have the conditions to setup a valid time window for the API request
    if not start_date and not time_interval:
        error_msg = "Please provide at least one valid start_time (datetime.datetime) or a time_interval (int). Cannot compute a time window for data retrieval otherwise.."
        timeseries_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Time for validate inputs
    utils.validate_input_type(device_name, str)
    utils.validate_input_type(end_date, datetime.datetime)
    # Limit is OPTIONAL but, because of what is explained in this method's man entry, I need this value to be sort of mandatory. This verification, given that the input is already set with a decent default value, is just to protect the
    # method's execution against a user setting it to None by whatever reason that may be
    utils.validate_input_type(limit, int)
    if start_date:
        utils.validate_input_type(start_date, datetime.datetime)
    if time_interval:
        utils.validate_input_type(time_interval, int)
    if interval:
        utils.validate_input_type(interval, int)
    if agg:
        utils.validate_input_type(agg, str)

    # Validate the argument against the list type
    if timeseries_keys_filter:
        utils.validate_input_type(timeseries_keys_filter, list)

        if len(timeseries_keys_filter) <= 0:
            timeseries_log.warning(
                "Invalid timeseries keys filter provided: empty list. This filter is going to be ignored"
            )
            timeseries_keys_filter = None
        else:
            # And each of its elements against the expected str type
            for timeseries in timeseries_keys_filter:
                utils.validate_input_type(timeseries, str)

    # Data type validation done. Now for the functional validations
    error_msg = None
    if limit <= 0:
        error_msg = "Invalid limit value: {0}. Please provide a greater than zero integer for this argument.".format(
            str(limit))
    elif end_date > datetime.datetime.now():
        error_msg = "Invalid end_date date provided: {0}! The date hasn't happen yet (future date). Please provide a valid datetime value!".format(
            str(end_date))
    elif start_date and not time_interval and start_date >= end_date:
        error_msg = "Invalid start_date date! The start_date provided ({0}) is newer/equal than/to the end_date date ({1}): invalid time window defined!".format(
            str(start_date), str(end_date))
    elif time_interval and not end_date and time_interval <= 0:
        error_msg = "Invalid time interval ({0})! Please provide a greater than zero value for this argument (the number of seconds to subtract from end_date).".format(
            str(time_interval))
    elif start_date and time_interval and start_date >= end_date and time_interval <= 0:
        error_msg = "Both start_date and time_interval arguments provided are invalid!\nThe start_date provided ({0}) is newer than the end_date indicated ({1}) and the time_interval as an invalid value ({2}).\n" \
                    "Please provide a valid (older) start_date or a valid (greater than 0) time_interval".format(str(start_date), str(end_date), str(time_interval))

    if error_msg:
        timeseries_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # And now for the cases where both valid start_time and time_interval were provided. The previous validation bundle made sure that, if only one of these two parameters was provided, it was valid. If I got to this point I can have both of these
    # parameter set to valid inputs but I need to warn the user that I'm only going to use one to define the time window
    if start_date and time_interval:
        timeseries_log.warning(
            "Both start_time and time_interval provided arguments are valid but only start_time is going to be considered moving on. Set this argument to None/Invalid to use the time_interval instead"
        )
        # So, if I'm dropping the time_interval, I need to signal this somehow moving forward:
        time_interval = None

    # Retrieve the device's credentials using the appropriate method
    device_cred = mysql_device_controller.get_device_credentials(
        device_name=device_name)

    # Check if a valid set of credentials was found
    if device_cred is None:
        error_msg = "Unable to retrieve a set of valid credentials to device '{0}'".format(
            str(device_name))
        timeseries_log.error(error_msg)
        raise mysql_utils.MySQLDatabaseException(message=error_msg)

    # The first 3 elements that I need to build the service endpoint are valid and retrieved. Lets deal with the time window then. The service endpoint requires that the limits of this window (startTs, endTs) to be passed in that weird POSIX
    # timestamp-like format that the ThingsBoard PostGres database adopted, i.e, a 13 digit number with no decimal point (10 digits for the integer part + 3 for the microseconds value... but with the decimal point omitted...). Fortunately I've
    # written the 'translate' functions already for this situation
    end_ts = mysql_utils.convert_datetime_to_timestamp_tb(end_date)

    # If the other end is defined by the start_time datetime.datetime object
    if start_date:
        # Easy
        start_ts = mysql_utils.convert_datetime_to_timestamp_tb(start_date)

    # If I got to this point in the code, given the brutality of validations undertaken so far, I can only get here with start_time = None and something valid in time_interval. Proceed accordingly
    else:
        # I need to convert this interval to a timedelta object to be able to subtract it to the end_time one
        time_interval = datetime.timedelta(seconds=int(time_interval))
        start_time = end_date - time_interval
        start_ts = mysql_utils.convert_datetime_to_timestamp_tb(start_time)

    # Done with the validations. Start building the service endpoint then.
    service_endpoint = "/api/plugins/telemetry/" + str(
        device_cred[0]) + "/" + str(device_cred[1]) + "/values/timeseries?"

    url_elements = []

    if interval:
        url_elements.append("interval=" + str(interval))

    url_elements.append("limit=" + str(limit))

    if agg:
        url_elements.append("agg=" + str(agg))

    # The element in result[2] can be a string containing multiple timeseries keys (if the device in question is a multisensor one). If a timeseries filter was provided, it is now time to apply it to reduce the number of variable types returned
    if timeseries_keys_filter:
        # Grab the original string list to a single variable
        device_ts_keys_list = str(device_cred[2])
        valid_keys = []
        for timeseries_key in timeseries_keys_filter:
            # And now check if any of the elements passed in the filter list is in the initial list
            if timeseries_key in device_ts_keys_list:
                # Add it to the valid keys list if so
                valid_keys.append(timeseries_key)
            # Otherwise warn the user of the mismatch
            else:
                timeseries_log.warning(
                    "The filter key '{0}' provided in the filter list is not a valid timeseries key. Ignoring it..."
                    .format(str(timeseries_key)))

        # If the last loop didn't yield any valid results, warn the user and default to the original string list
        if not len(valid_keys):
            timeseries_log.warning(
                "Unable to apply timeseries key filter: none of the provided keys had a match. Defaulting to {0}..."
                .format(str(device_ts_keys_list)))
            valid_keys = device_ts_keys_list
        else:
            # And inform the user of the alteration
            timeseries_log.info(
                "Valid filter found. Running remote API query with keys: {0}".
                format(str(valid_keys)))

        url_elements.append("keys=" + ",".join(valid_keys))

    else:
        # No filters required. Append the full timeseries elements then
        url_elements.append("keys=" + ",".join(device_cred[2]))

    url_elements.append("startTs=" + str(start_ts))
    url_elements.append("endTs=" + str(end_ts))

    # Done. Now mash up the whole thing into a '&' separated string
    service_endpoint += "&".join(url_elements)

    # I'm finally ready to query the remote endpoint. This service requires a REGULAR type authorization token
    service_dict = utils.build_service_calling_info(
        mac.get_auth_token(user_type='tenant_admin'), service_endpoint)

    try:
        response = requests.get(url=service_dict['url'],
                                headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout):
        error_msg = "Unable to establish a connection with {0}...".format(
            str(service_dict['url']))
        timeseries_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)

    # Check first if the response came back with a HTTP 200
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received HTTP {0} with message: {1}".format(
            str(response.status_code), str(eval(response.text)['message']))
        timeseries_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # The results are going to be returned as a dictionary of dictionaries in the following format:
        # result_dict = {
        #               "timeseries_key_1": [
        #                   {'ts': int, 'value': str},
        #                   {'ts': int, 'value': str},
        #                   ...
        #                   {'ts': int, 'value': str}
        #               ],
        #               "timeseries_key_2": [
        #                   {'ts': int, 'value': str},
        #                   {'ts': int, 'value': str},
        #                   ...
        #                   {'ts': int, 'value': str}
        #               ],
        #               ...
        #               "timeseries_key_N": [
        #                   {'ts': int, 'value': str},
        #                   {'ts': int, 'value': str},
        #                   ...
        #                   {'ts': int, 'value': str}
        #               ]
        # }
        # Use this as a reference for when another method needs to consume data from this response. Its a over complicated structure, honestly, and its not hard to create a simple method to call after this to simplify it greatly. But there's no
        # point in doing that until we know exactly what is the format that need to be returned.

        # Apply the 'eval' base method just to transform the str that is returned into a dict
        result_dict = eval(response.text)

        # Finally, check if any of the entries in the returned dictionary matches the 'limit' parameter and warn the user of potential missing results if so
        for result_key in list(result_dict.keys()):
            if len(result_dict[result_key]) == limit:
                timeseries_log.warning(
                    "Timeseries key '{0}' results were limited by the 'limit' parameter: got {1} valid results back"
                    .format(str(result_key), str(limit)))

        # Return the result dictionary finally
        return result_dict
def getTenantAssets(type=None, textSearch=None, idOffset=None, textOffset=None, limit=10):
    """This is the standard method to retrieve all ASSETs currently in the ThingsBoard installation database (regardless which database is implemented). As with all services of this type so far, this is the ThingsBoard side of the whole process,
    the one that places a request in the expected format to the ThingsBoard API.
    @:param type (str): OPTIONAL Use this argument to filter the results for a specific asset type (eg. 'building', 'room', 'floor', etc...) This is a free text field from the ThingsBoard side, which means any string can be set in this field. If you
    know a
    priori which exact ASSET type you're interested in, use this argument to narrow down the results to be returned
    @:param textSearch (str): OPTIONAL Use this argument to provide a str to be used to narrow down the returned results based on 'name' field from the ASSET description. As with similar methods, this field is quite limited: unless an exact match
    is found between the provided textSearch argument and the contents of the 'name' field, no filtering actually takes place.
    @:param idOffset (str): OPTIONAL Analogous field to the previous one but this one applies to the 'id' field. The filtering abilities of this argument are also quite limited. Check similar methods that use this argument too for more detailed
    descriptions.
    @:param textOffset (str): OPTIONAL So far, still no idea of what this does. Other than determining that it only accepts strings, I still have no clue to what is the actual purpose of this element.
    @:param limit (int): Use this field to limit the number of results returned from this service. If the argument in this field prevents the full scope of results to be returned, a specific set of structures, namely a 'nextPageLink' and
    'hasNext' are also returned. In this event, the method warn the caller that are results left to return but in the end is up to the caller to specify an higher limit value to return them.
    @:return result (list of dict): If the API call was successful, this method returns an HTTP response object back with the following dictionary in its 'text' field:
    "data": [
    # ASSET 1 data
    {
        "id": {
        "entityType": string,
        "id": string
      },
      "createdTime": int,
      "additionalInfo": {
        "description": "A dummy building that I'm planning to fill with dead rats and cockroaches once I'm done with it"
      },
      "tenantId": {
        "entityType": string,
        "id": string
      },
      "customerId": {
        "entityType": string,
        "id": string
      },
      "name": string,
      "type": string
    },
    # ASSET 2 Data
    {
        "id": {
        "entityType": string,
        "id": string
      },
      "createdTime": int,
      "additionalInfo": {
        "description": string
      },
      "tenantId": {
        "entityType": string,
        "id": string
      },
      "customerId": {
        "entityType": string,
        "id": string
      },
      "name": string,
      "type": string
    },
    .
    .
    .
    # ASSET N Data
    {
        "id": {
        "entityType": string,
        "id": string
      },
      "createdTime": int,
      "additionalInfo": {
        "description": string
      },
      "tenantId": {
        "entityType": string,
        "id": string
      },
      "customerId": {
        "entityType": string,
        "id": string
      },
      "name": string,
      "type": string
    }
    ]
    """
    asset_control_log = ambi_logger.get_logger(__name__)

    # Validate mandatory inputs first
    utils.validate_input_type(limit, int)

    # And now for the OPTIONAL ones
    if type:
        utils.validate_input_type(type, str)
    if textSearch:
        utils.validate_input_type(textSearch, str)
    if idOffset:
        utils.validate_input_type(idOffset, str)
    if textOffset:
        utils.validate_input_type(textOffset, str)

    # Validate the limit a bit further
    if limit <= 0:
        error_msg = "Invalid limit provided: {0}. Please provide a positive, greater than zero limit value!".format(str(limit))
        asset_control_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Setup the base endpoint
    service_endpoint = "/api/tenant/assets?"

    url_strings = []

    # Check the provided inputs and add the necessary elements to the endpoint to call the service
    if type:
        url_type = "type=" + urllib.parse.quote(type.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_type)

    if textSearch:
        url_textSearch = "textSearch=" + urllib.parse.quote(textSearch.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textSearch)

    if idOffset:
        url_idOffset = "idOffset=" + urllib.parse.quote(idOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_idOffset)

    if textOffset:
        url_textOffset = "textOffset=" + urllib.parse.quote(textOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textOffset)

    url_strings.append("limit=" + str(limit))

    # Concatenate the elements in the url_strings list into a single url string
    service_endpoint += '&'.join(url_strings)

    # Get the standard dictionary to call the remote service. It appears that different installations require different sets of user credentials... still trying to figure out what the hell is going on with this one
    service_dict = utils.build_service_calling_info(mac.get_auth_token('tenant_admin'), service_endpoint=service_endpoint)

    # And try to get a response from the remote API
    try:
        response = requests.get(url=service_dict['url'], headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(str(service_dict['url']))
        asset_control_log.error(error_msg)
        raise ce

    # Check the HTTP response code first
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received an HTTP " + str(eval(response.text)['status']) + " with message: " + str(eval(response.text)['message'])
        asset_control_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # Check if the 'hasNext' flag is set, i.e., if there are still results to return from the ThingsBoard side of things. In any case, the result structure has that flag set to either 'true' or 'false', which are not recognized as proper
        # boolean values by Python (those are boolean natives from Postgres/Cassandra). As such, I need to 'translate' the returned text to Python-esque first using the method built for that purpose
        if eval(utils.translate_postgres_to_python(response.text))['hasNext']:
            asset_control_log.warning("Only {0} results returned. There are still more results to return from the remote API side. Increase the 'limit' argument to obtain them.".format(str(limit)))

        # But return the response nonetheless
        return response
def getTenants(textSearch=None, idOffset=None, textOffset=None, limit=10):
    """GET method to retrieve either all tenants registered in the thingsboard server or a specific tenant by providing the related search terms.
    @:param OPTIONAL textSearch (str) - A text search string to limit the number of tenants to be returned by this operation. This functionality is quite limited I may add. It only searches the title field and only returns any results if
    this element is EXACTLY equal to the title field. Eg. textSearch='Mr Ricardo Almeida' returns that tenant information but textSearch='Ricardo Almeida' return nothing even though this string matches exactly the 'name' field
    @:param OPTIONAL idOffset (str) - A possible search pattern for just the 'id' field. (Strangely enough, using the tenant's id in the textSearch parameter yields no results). A tenant id has a fixed format [8]-[4]-[4]-[4]-[8], that is,
    a 8 character block, a '-' character, then a 4 character block, another '-' character and so on. If the idOffset provided is anything but the first four blocks, including the last '-', the remote API returns a HTTP 400 - Invalid UUID
    string. Yet adding just one character after the last '-' returns a list of all registered tenants... again, I'm still failing to see the use of this field to be honest
    @:param OPTIONAL textOffset (any) - No idea what this field is used for. I've tried searches with matching and un-matching strings, ints, floats, etc... and I always get all the tenants back. Pointless field if I ever saw one...
    @:param limit (int) - The only required field in this methods. Limits the number of results to return
    @:return tenant_data (dict) - The return element is a complex one. If successful, the returned structure is as follows:
    tenant_data = {
        'data': [
            {
                tenant_data_1
            },
            {
                tenant_data_2
            }, ...
            {
                tenant_data_n
            }
        ],
        'nextPageLink': nextPageData,
        'hasNext': bool
    }

    The latter is the overall framework of how the results are returned: a dictionary with 3 keys: 'data', 'nextPageLink' and 'hasNext'.
    The 'data' key contains a list of dictionaries, with as many items as either existing tenants or limit number, whatever is lower. Each item of the data list has the following format:
    tenant_data = {
        'id': {
            'entityType': str,
            'id': str
        },
        'createdTime': int (POSIX timestamp),
        'description': str,
        'country': str,
        'state', str,
        'city': str,
        'address': str,
        'address2': str,
        'zip': str,
        'phone': str,
        'email': str,
        'title': 'str'
        'region': str,
        'name': str
    }
    The 'nextPageLink' is either set to 'None' if all existing tenant data was returned in the 'data' value or, if the limit argument has clipped the number or returned elements, then it has the following dictionary format:
    'nextPageLink': {
        'limit': int,
        'textSearch': str,
        'textSearchBound': str,
        'textOffset': str,
        'idOffset': str
    }
    while the 'hasNext' (a boolean) key is set to either False if the limit argument was high enough or True if there are still results left to return from the remote ThingsBoard API
    """

    # Fetch a local logger for this method only
    tenant_log = ambi_logger.get_logger(__name__)

    # Validate inputs
    try:
        # Validate the mandatory inputs
        utils.validate_input_type(limit, int)
        # As for the optional ones, I need to check if they were passed with non-default values first
        if textSearch:
            utils.validate_input_type(textSearch, str)
        elif idOffset:
            utils.validate_input_type(idOffset, str)
        elif textOffset:
            utils.validate_input_type(textOffset, str)
    except utils.InputValidationException as ive:
        tenant_log.error(ive.message)
        raise ive

    # The limit parameter needs a bit more validation. Passing limit=0 also triggers an error from the remote API. By validating this parameter at this stage, there no need to deal with this potential error later on.
    if limit <= 0:
        error_msg = "Invalid limit provided: {0}. Please provide a positive, greater than zero limit value!".format(
            str(limit))
        tenant_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    service_endpoint = "/api/tenants?"

    # This service point for this particular method has its arguments built-in using a standard URL format. Because of this, I need to make sure that
    # whatever characters are passed in this method's arguments, they are properly escaped before (URL strings are notoriously picky) being added to
    # URL string

    url_strings = []
    if textSearch:
        # First encode the string into a safe character set (UTF-8) and only then do the escaping of all characters to URL charset. NOTE: By a some reason the following method escapes all problematic characters to URL-esque except one of
        # the worst ones in that regard: '/'. The forward slash, a basic character in URL strings to denote paths, should be escaped to '%2F' if it appears in an argument string but the method bellow ignores this, not sure really why...
        # Anyway, an easily solved problem by forcing a string replace for this character after executing the base function
        url_textSearch = "textSearch=" + urllib.parse.quote(
            textSearch.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textSearch)

    if idOffset:
        url_idOffset = "idOffset=" + urllib.parse.quote(
            idOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_idOffset)

    if textOffset:
        url_textOffset = "textOffset=" + urllib.parse.quote(
            textOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textOffset)

    # Add the mandatory parameter to the list as is
    url_strings.append("limit=" + str(limit))
    service_endpoint += '&'.join(url_strings)

    service_dict = utils.build_service_calling_info(
        mac.get_auth_token(user_type='sys_admin'), service_endpoint)
    try:
        response = requests.get(url=service_dict['url'],
                                headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}..".format(
            str(service_dict['url']))
        tenant_log.error(error_msg)
        raise ce

    # In order to continue, I'm only interested in HTTP 200. Whatever comes back different than that, I'm shutting down this thing
    if response.status_code != 200:
        # Capture the error message that is returned in the message body, as a dictionary encoded in a str (hence the eval to cast it from str back to dict)
        error_msg = "Received an HTTP " + str(eval(
            response.text)['status']) + " with the message: " + str(
                eval(response.text)['message'])
        tenant_log.error(error_msg)
        raise utils.ServiceEndpointException(
            message=error_msg,
            error_code=int(eval(response.text)['errorCode']))
    else:
        # Replace the troublesome elements from the API side to Python-esque (Pass it just the text part of the response. I have no use for the rest of the object anyway)

        # At this point, I'm going to check the state of the 'hasNext' key in the response and warning the user if its set to True (means that the limit argument was set at value that left some records still on the API side)
        if eval(utils.translate_postgres_to_python(response.text))['hasNext']:
            # In this case, warn the user and carry on
            tenant_log.warning(
                "There are still more results to return from the API side. Increase the 'limit' argument value to obtain them."
            )

        # I'm done. Send back the response data
        return response
def get_auth_token(user_type):
    """This is going to be the go-to method in this module. This method receives one of the supported user types ('SYS_ADMIN', 'TENANT_ADMIN' or 'CUSTOMER_USER') and fetches the respective authorization token. What this method does to get it is
    abstracted from the user. This method automatically checks the usual suspects first: database table. If there's any token in there for the provided user type, it then tests it to see if it is still valid. If not, it then tries to use the
    refresh token to issue a valid one and, if that is also not possible, request a new pair of authentication and refresh tokens.
    This method should be integrated into basic service calls to save the user to deal with the whole authorization token logistics
    @:param user_type (str) - One of the following supported user types: sys_admin, tenant_admin, customer_user (the case type of this argument is irrelevant because I will take care of it later on)
    @:raise utils.InputValidationException - If an invalid argument is provided
    @:raise utils.AuthenticationException - If the authentication credentials are not correct
    @:raise utils.ServiceEndpointException - If the call to the remote service fails
    @:raise mysql_utils.MySQLDatabaseException - If problems arise when dealing with the database
    @:return token (str) - A valid authorization token that can be used to authenticate a remote service call"""

    auth_token_log = ambi_logger.get_logger(__name__)

    # Validate the input as a data type and as one of the expected user types
    utils.validate_input_type(user_type, str)

    # Set the user type string to all lower case characters to simplify comparisons from this point on
    user_type = user_type.lower()
    supported_user_types = ['sys_admin', 'tenant_admin', 'customer_user']

    if user_type not in supported_user_types:
        raise utils.InputValidationException(
            "Invalid user type provided: '{0}'. Please provided one of these: {1}"
            .format(str(user_type), str(supported_user_types)))

    # All seems good so far. Lets check the database first
    database_name = user_config.mysql_db_access['database']
    table_name = proj_config.mysql_db_tables['authentication']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)
    change_cursor = cnx.cursor(buffered=True)

    # Grab the full column list from the database for indexing purposes
    column_list = mysql_utils.get_table_columns(database_name=database_name,
                                                table_name=table_name)

    # Lets see if there's any token already in the database
    sql_select = """SELECT token, refreshToken FROM """ + str(
        table_name) + """ WHERE user_type = %s;"""

    select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select,
                                                  (user_type, ))

    # Check if any results came back
    if select_cursor.rowcount > 0:
        # I got a valid token pair back. Extract the authorization from it
        auth_token = select_cursor.fetchone()[0]

        # And call the getUser service providing the retrieved token to see if a) the token is still valid and b) the user_type provided matches what the remote API sends back
        token_status_response = tb_auth_controller.getUser(
            auth_token=auth_token)

        # And convert the response body into the expected dictionary for easier access after this point.
        # NOTE: Interesting development in this case: it turns out that I can authorize a user using a authorization token that was issued from a different ThingsBoard installation!!! In other words, I can get a authorization token issued from my
        # local ThingsBoard installation and use it to get a "valid" authentication in the remote ThingsBoard installation. When I say "valid" I mean, the interface accepts the token without any kind of feedback regarding its actual validity. Yet,
        # when I execute a service with it, guess what? I do get a HTTP 200 response but without any data!! This doesn't make any sense and is going to complicate my code a lot! So I need to deal with this retarded cases too...
        # Attempt to do the following only if something was returned back in the text parameter of the response
        token_status_dict = None

        if token_status_response.text != "":
            token_status_dict = eval(
                utils.translate_postgres_to_python(token_status_response.text))

        # This particular annoying case in which a valid authorization token from a different installation is used in this case. In this case, the installation accepts the token, since it has the expected format, but internally it gets rejected
        # because the credential pair that originated it obviously doesn't match! But somehow the API fails to mention this! Instead, the damn thing accepts the token and even returns HTTP 200 responses to my requests but these come back all
        # empty, presumably because the internal authentication failed... because the tokens are wrong. Gee, what an unnecessary mess... If a case such as that is detected, simply get a new pair of tokens back. Most of these cases are solved by
        # forcing a token refresh
        if token_status_response.status_code != 200 or token_status_response.text == "":
            # Check the most usual case for a non-HTTP 200 return: HTTP 401 with sub-errorCode (its embedded in the response text) 11 - the authorization token has expired
            if token_status_response.status_code == 401 and eval(
                    token_status_response.text)['errorCode'] == 11:
                # Inform the user first
                auth_token_log.warning(
                    "The authorization token for user type = {0} retrieved from {1}.{2} is expired. Requesting new one..."
                    .format(str(user_type), str(database_name),
                            str(table_name)))
            elif token_status_response.text == "":
                auth_token_log.warning(
                    "The authorization provided was issued from a different ThingsBoard installation than this one! Need to issued a new pair..."
                )

            # Use the refresh token to retrieve a new authorization dictionary into the proper variable. No need to provide the refresh token: the tb_auth_controller.refresh_session_token method already takes care of retrieving it from the
            # database. Create a dictionary to call this method by setting all user_types to False except the one that I want
            new_auth_dict = {
                'sys_admin': False,
                'tenant_admin': False,
                'customer_user': False,
                user_type: True
            }

            # If I caught that annoying case in which a valid authorization token from a different ThingsBoard installation
            if token_status_response.text == "":
                new_auth_dict = tb_auth_controller.get_session_tokens(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])
                auth_token_log.info(
                    "Got a new pair of authorization tokens for the {0} ThingsBoard installation."
                    .format(str(user_config.access_info['host'])))
            # Otherwise, its an expired token case. Deal with it properly then
            # NOTE: The call to the refresh_session_token method already verifies and deals with expired refreshTokens too.
            else:
                new_auth_dict = tb_auth_controller.refresh_session_token(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])
                auth_token_log.info(
                    "Refreshed the authorization tokens for the {0} ThingsBoard installation."
                    .format(str(user_config.access_info['host'])))

            # From this point on, the process is the same for both cases considered above

            # If I got to this point, then my new_auth_dict has a fresh pair of authorization and refresh tokens under the user_type key entry (the previous call raises an Exception otherwise)
            # In this case, I have the tokens in the database expired. Update these entries before returning the valid authorization token
            sql_update = mysql_utils.create_update_sql_statement(
                column_list=column_list,
                table_name=table_name,
                trigger_column_list=['user_type'])

            # Prepare the data tuple for the UPDATE operation respecting the expected order: user_type, token, token_timestamp, refreshToken, refreshToken_timestamp and user_type again (because of the WHERE clause in the UPDATE)
            update_data_tuple = (user_type, new_auth_dict[user_type]['token'],
                                 datetime.datetime.now().replace(
                                     microsecond=0),
                                 new_auth_dict[user_type]['refreshToken'],
                                 datetime.datetime.now().replace(
                                     microsecond=0), user_type)

            # Execute the statement
            change_cursor = mysql_utils.run_sql_statement(
                change_cursor, sql_update, update_data_tuple)

            # And check the execution results
            if not change_cursor.rowcount:
                error_msg = "Could not update {0}.{1} with '{2}' statement...".format(
                    str(database_name), str(table_name),
                    str(change_cursor.statement))
                auth_token_log.error(error_msg)
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                raise mysql_utils.MySQLDatabaseException(message=error_msg)
            else:
                auth_token_log.info(
                    "Token database information for user_type = '{0}' updated successfully in {1}.{2}!"
                    .format(str(user_type), str(database_name),
                            str(table_name)))
                cnx.commit()
                # Close the database access objects and return the valid token then
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                return new_auth_dict[user_type]['token']
        # Check if the response returned has the user type (which would be under the 'authority' key in the response dictionary), matches the user_type provided (it would be quite weird if doesn't, but check it anyways)
        elif token_status_dict is not None and token_status_dict[
                'authority'].lower() != user_type:
            auth_token_log.warning(
                "Attention: the authorization token retrieved from {0}.{1} for user type '{2}' provided is actually associated with a '{3}' user type! Resetting..."
                .format(str(database_name), str(table_name), str(user_type),
                        str(token_status_dict['authority'])))
            # Mismatch detected. Assuming that the ThingsBoard API only accepts user types from the set defined and since I've validated the user type provided as argument also, this means that my mismatch is at the MySQL database level,
            # that somehow has a valid authentication token submitted under a valid user type, just not the correct one
            # First, update the user_type in the database for the correct one (the one retrieved from the remote API)
            remote_user_type = token_status_dict['authority']
            # Request an UPDATE SQL template to replace the current user type by the correct remote_user_type
            sql_update = mysql_utils.create_update_sql_statement(
                column_list=['user_type'],
                table_name=table_name,
                trigger_column_list=['user_type'])
            data_tuple = (remote_user_type, user_type)

            # Execute the statement
            change_cursor = mysql_utils.run_sql_statement(
                change_cursor, sql_update, data_tuple)

            # Check if something was done
            if not change_cursor.rowcount:
                error_msg = "Update operation '{0}' in {1}.{2} not successful!".format(
                    str(change_cursor.statement), str(database_name),
                    str(table_name))
                auth_token_log.error(error_msg)
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                raise mysql_utils.MySQLDatabaseException(message=error_msg)
            else:
                # Commit the changes, warn the user, request a new authentication token for the original user_type requested, save it in the database (in a new entry given that the last one was changed) and return the valid authorization token
                # back, which should always be what this method does before exiting (either this or raise an Exception)
                cnx.commit()

                auth_token_log.warning(
                    "Successfully updated user_type = {0} entry to {1} in {2}.{3}. Requesting new authorization token to {0}..."
                    .format(str(user_type), str(remote_user_type),
                            str(database_name), str(table_name)))

                # Set out the flags for the new session token request, setting all user_types to False at first but then switching on to True only the one matching the provided user_type
                new_auth_dict = {
                    'sys_admin': False,
                    'tenant_admin': False,
                    'customer_user': False,
                    user_type: True
                }

                # And now I can request a new session token for only the user_type that I need without having to explicit a different call signature for each possible case. Clever!
                new_auth_dict = tb_auth_controller.get_session_tokens(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])

                # If I got here, it means that I have a new authorization dictionary with all entries set to None except the one corresponding to the requested user_type. Update the database and return the token back to the user. Since the
                # new_auth_dict is properly filled, I can now ignore the rest of this if-else jungle. The fact that new_auth_dict is not None anymore is going to trigger an INSERT operation with its data into the database
                pass

        # The HTTP status code is a nice 200 OK. Nothing to do but to return the valid token
        else:
            auth_token_log.info(
                "Got a still valid authorization token for user type {0} from {1}.{2}."
                .format(str(user_type), str(database_name), str(table_name)))
            # Close the database structures before returning the token
            select_cursor.close()
            change_cursor.close()
            cnx.close()
            return auth_token

    else:
        # If I get to this point it means that no valid authorization token was found so far in the database. Yet, there is a possibility that some other token request may have be been placed in the logic above and now it needs the data retrieved to
        # be sent to the database. I can detect this by looking at the new_auth_dict variable. If its None, it means that I need to request a new pair of tokens for this user_type.
        # Create a base for the new authorization dictionary by setting all user_types to False initially and then triggering just the one that needs new authorization tokens to True
        new_auth_dict = {
            'sys_admin': False,
            'tenant_admin': False,
            'customer_user': False,
            user_type: True
        }

        # And use this to request a new pair of authorization tokens from the remote API
        new_auth_dict = tb_auth_controller.get_session_tokens(
            sys_admin=new_auth_dict['sys_admin'],
            tenant_admin=new_auth_dict['tenant_admin'],
            customer_user=new_auth_dict['customer_user'])

    # In any case, I should have a new_auth_dict dictionary here with one entry filled in with a valid authorization token. Time to add it to the database
    sql_insert = mysql_utils.create_insert_sql_statement(
        column_list=column_list, table_name=table_name)

    # And create the data tuple by replacing the members in the column_list retrieved before by the corresponding values
    column_list[column_list.index('user_type')] = user_type
    column_list[column_list.index('token')] = new_auth_dict[user_type]['token']
    column_list[column_list.index(
        'token_timestamp')] = datetime.datetime.now().replace(microsecond=0)
    column_list[column_list.index(
        'refreshToken')] = new_auth_dict[user_type]['refreshToken']
    column_list[column_list.index(
        'refreshToken_timestamp')] = datetime.datetime.now().replace(
            microsecond=0)

    # Execute the statement
    change_cursor = mysql_utils.run_sql_statement(change_cursor, sql_insert,
                                                  tuple(column_list))

    if not change_cursor.rowcount:
        error_msg = "Failed to execute '{0}' in {1}.{2}. Exiting...".format(
            str(change_cursor.statement), str(database_name), str(table_name))
        auth_token_log.error(error_msg)
        change_cursor.close()
        select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)
    else:
        cnx.commit()
        auth_token_log.info(
            "Added authorization token from user_type = {0} to {1}.{2} successfully!"
            .format(str(user_type), str(database_name), str(table_name)))
        # Return the token then
        select_cursor.close()
        change_cursor.close()
        cnx.close()
        return new_auth_dict[user_type]['token']
def getCustomers(textSearch=None, idOffset=None, textOffset=None, limit=10):
    """This in one of the simplest GET methods in the customer-controller section of the ThingsBoard API. With it I only need to provide a valid limit number and I can request a list of all registered customers in the platform so far,
    so that I can then populate my own MySQL database table with them.
    All parameters initialized to None in the method signature are OPTIONAL (textSearch, idOffset and textOffset). Those that were set to specific values and data types are MANDATORY (limit)
    @:type user_types allowed for this service: CUSTOMER_USER
    @:param textSearch (str) - Use this field to narrow down results based only in the 'name' field (which in this case should be the same as 'title', though the textSearch field only goes to the former). Yet, in order to yield any results,
    the textSearch field has to be exactly equal to whatever is in the 'name' field in the remote API (put case sensitive in this case...)
    @:param idOffset (str) - A similar field as the one before in the sense that it the sense that its search scope is limited to the 'id' fields. It provides a bit more of flexibility than the last one - id string can be inserted with, at most,
    11 of their last characters omitted and meaningful results are still returned. Any id string smaller than that results in a 'Invalid UUID string' errors.
    @:param textOffset (str) - I'm still at a loss as to what the hell this parameter does... This is the third API service that I process using a python module, with loads of testing using the ThingsBoard Swagger testing application and I still
    waiting for a test that can shed any light on what this... thing... really does. Leave it empty or write your favorite poem in it: its all the same for the remote API really...
    @:param limit (int) - Use this field to limit the number of results returned, regardless of other limiters around. If the limit field did truncates the set of returned results, the result dictionary is returned with its 'nextPageLink' key set
    to another dictionary describing just that and the 'hasNext' key is set to True. Otherwise, if all records were returned, 'nextPageLink' is set to NULL and 'hasNext' is returned set to False.
    @:raise utils.InputValidationException - For errors during the validation of inputs
    @:raise utils.ServiceEndpointException - For errors occurring during the interface with the remote API
    @:raise Exception - For any other types of errors
    @:return an HTTP response object containing the following result dictionary:
    {
        "data": [
            {
                customer_data_1
            },
            {
                customer_data_2
            },
            ...
            {
                customer_data_n
            }
        ],
        "nextPageLink": null or dict,
        "hasNext": bool
    }
    Each "customer_data" sub dictionary has the following format:
    customer_data = {
        "id": {
            "entityType": str,
            "id": str
        },
        "createTime": int,
        "additionalInfo": null or {
            "description": str
        },
        "country": str,
        "state": str,
        "city": str,
        "address": str,
        "address2": str,
        "zip": str,
        "phone": str,
        "email": str,
        "title": str,
        "tenantId": {
            "entityType": str,
            "id": str
        },
        "name": str
    }
    """
    customer_log = ambi_logger.get_logger(__name__)

    # Validate inputs
    try:
        utils.validate_input_type(limit, int)
        if textSearch:
            utils.validate_input_type(textSearch, str)
        if idOffset:
            utils.validate_input_type(idOffset, str)
        if textOffset:
            utils.validate_input_type(textOffset, str)
    except utils.InputValidationException as ive:
        customer_log.error(ive.message)
        raise ive

    if limit <= 0:
        error_msg = "Invalid limit provided: {0}. Please provide a value greater than zero for the limit value!".format(
            str(limit))
        customer_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    service_endpoint = "/api/customers?"

    url_strings = []

    if textSearch:
        textSearch = urllib.parse.quote(textSearch.encode('UTF-8')).replace(
            '/', '%2F')
        url_strings.append("textSearch=" + str(textSearch))
    if idOffset:
        idOffset = urllib.parse.quote(idOffset.encode('UTF-8')).replace(
            '/', '%2F')
        url_strings.append("idOffset=" + str(idOffset))
    if textOffset:
        textOffset = urllib.parse.quote(textOffset.encode('UTF-8')).replace(
            '/', '%2F')
        url_strings.append("textOffset=" + str(textOffset))
    url_strings.append("limit=" + str(limit))

    # Create the endpoint request string
    service_endpoint += '&'.join(url_strings)

    # Place the HTTP GET request using a REGULAR type authorization token
    service_dict = utils.build_service_calling_info(
        mac.get_auth_token(user_type='tenant_admin'), service_endpoint)

    # Query the remote API
    try:
        response = requests.get(url=service_dict['url'],
                                headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a request from {0}...".format(
            str(service_dict['url']))
        customer_log.error(error_msg)
        raise ce

    # Check the status code of the HTTP response before moving forward
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received an HTTP {0} with message {1}.".format(
            str(eval(response.text)['status']),
            str(eval(response.text)['message']))
        customer_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # Check the status of the 'hasNext' parameter returned
        if eval(utils.translate_postgres_to_python(response.text))['hasNext']:
            customer_log.warning(
                "Only {0} results returned. There are still more results to return from the remote API side. Increase the 'limit' argument to obtain them."
                .format(str(limit)))

        return response
def refresh_session_token(sys_admin=False,
                          tenant_admin=False,
                          customer_user=False):
    """This method is analogous to the get_session_token one but using the refreshToken, that is assumed to be in the respective database already, to get a valid authorization token without needing to provide the access credentials again,
    thus a more secure way to keep sessions active. This requires at least one of the input argument flags to be set to function. All three currently supported user_types can be refreshed by one call to this method, as long as the flags are set.
    The idea here being that, when requested, the remote API returns a pair of authentication token/refresh token in which the authentication token as a shorter validity period than the refresh token. But once the authentication token get expired,
    if a call to this method is placed before the refresh token also expires (which also happened, though later than the first one) allows to reset the whole thing, since using the refresh token routine results in a new, fresh pair with both
    expiration periods reset
    @:param sys_admin (bool) - Flag to set a refresh on the tokens for the SYS_ADMIN
    @:param tenant_admin (bool) - Flag to set a refresh on the tokens for the TENANT_ADMIN
    @:param customer_user (bool) - Flag to set a refresh on the tokens for the CUSTOMER_USER
    @:raise utils.AuthenticationException - If the access credentials cannot be used to retrieve authentication data
    @:raise utils.ServiceCallException - If errors happen when accessing the remote service
    @:raise utils.InputValidationException - If an invalid argument is provided (data type wise)
    @:raise mysql_utils.MySQLDatabaseException - If errors happen with the database access or with the integrity of the data in the database
    @:return auth_dict (dict) - An authentication dictionary in the same format used so far:
        auth_dict = {
            'sys_admin': {
                'token': str,
                'refreshToken': str
            },
            'tenant_admin': {
                'token': str,
                'refreshToken': str
            },
            'customer_user': {
                'token': str,
                'refreshToken': str
            }
        }
    As before, omitted user types have their sub dictionary set to None. This method only returns this structure back. Its up to the calling method to update the database table, to keep things decoupled as much as possible at this point
    """
    refresh_token_log = ambi_logger.get_logger(__name__)

    # Check first if at least one of the argument flags was set, after validation that is
    if sys_admin:
        utils.validate_input_type(sys_admin, bool)

    if tenant_admin:
        utils.validate_input_type(tenant_admin, bool)

    if customer_user:
        utils.validate_input_type(customer_user, bool)

    # If all argument flags are False
    if not (sys_admin or tenant_admin or customer_user):
        error_msg = "No user types set. Please set one of the argument flags when calling this method..."
        refresh_token_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Retrieve the existing tokens from the database into the typical authentication dictionary
    auth_dict = {
        'sys_admin': None,
        'tenant_admin': None,
        'customer_user': None
    }

    # Add all the requested user type to a list
    user_type_list = []

    # Add the relevant user type strings to a list for iterating
    if sys_admin:
        user_type_list.append('sys_admin')

    if tenant_admin:
        user_type_list.append('tenant_admin')

    if customer_user:
        user_type_list.append('customer_user')

    # And grab the results from the database
    database_name = user_config.mysql_db_access['database']
    table_name = proj_config.mysql_db_tables['authentication']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)
    select_column = 'user_type'

    sql_select = """SELECT * FROM """ + str(table_name) + """ WHERE """

    data_tuple = tuple(user_type_list)
    where_list = []

    for i in range(0, len(user_type_list)):
        where_list.append(select_column + " = %s")

    sql_select += """ OR """.join(where_list) + """;"""

    # Execute the statement then
    select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select,
                                                  data_tuple)

    # Check if any results were returned
    if not select_cursor.rowcount:
        error_msg = "The statement '{0}' didn't return any results from {1}.{2}.".format(
            str(select_cursor.statement), str(database_name), str(table_name))
        refresh_token_log.error(error_msg)
        select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)
    # Move on if you got any results back
    else:
        # Get the list of column names from the authentication table to use as a reference to obtain the data I'm looking for
        column_list = mysql_utils.get_table_columns(
            database_name=database_name, table_name=table_name)

        # Create the base endpoint (for getting a new pair of authorization tokens from an expired authorization one and a still valid refresh token, provide both tokens (with the refreshToken sent as data payload) to the '/token' endpoint
        service_endpoint = '/api/auth/token'

        # Now I can pick up a record at a time, refresh the authentication token and update the return dictionary with the reply
        result = select_cursor.fetchone()

        # Do the following as long as there is a non-None element returned from the database cursor
        while result:
            # Start by getting the standard service call structures
            con_dict = utils.build_service_calling_info(
                auth_token=result[column_list.index('token')],
                service_endpoint=service_endpoint)

            # Build the additional data payload structure which is needed for this service call in particular
            data = '{"refreshToken": "' + str(
                result[column_list.index('refreshToken')]) + '"}'

            # And call the remote API with the refresh request
            try:
                api_response = requests.post(url=con_dict['url'],
                                             headers=con_dict['headers'],
                                             data=data)
            except (requests.exceptions.ConnectionError,
                    requests.exceptions.ConnectTimeout):
                error_msg = "Unable to establish a connection with {0}:{1}. Exiting...".format(
                    str(user_config.thingsboard_host),
                    str(user_config.thingsboard_port))
                refresh_token_log.error(error_msg)
                select_cursor.close()
                cnx.close()
                raise utils.ServiceEndpointException(message=error_msg)

            # If a non-HTTP 200 status code was returned, its probably a credential issue. Raise an exception with the proper information in it
            if api_response.status_code != 200:
                # Check first if the status code is HTTP 401 and if the sub-errorCode is 11, which means that the refresh token for this user_type is also expired. In this case I can always request a new pair instead of raising an Exception
                if api_response.status_code == 401 and eval(
                        api_response.text)['errorCode'] == 11:
                    user_type = result[column_list.index('user_type')]
                    refresh_token_log.warning(
                        "The refresh token for user_type '{0}' is also expired. Requesting a new pair..."
                        .format(str(user_type)))

                    # Request for a new pair of authorization tokens but only for the user_type that has its expired so that the other user_types tokens currently in the database don't get invalidated by forcing an issue of a new pair. The
                    # get_session_token method either returns a valid pair of authorization tokens or it raises an Exception with the reason why it couldn't do it in the first place, so there's no need to verify the next call's results
                    auth_dict = get_session_tokens(
                        sys_admin=(user_type == 'sys_admin'),
                        tenant_admin=(user_type == 'tenant_admin'),
                        customer_user=(user_type == 'customer_user'))

                    return auth_dict

                # If the error was something other than HTTP 401 with a sub-errorCode of 10, raise an Exception with the error details
                else:
                    refresh_token_log.error(api_response.text)
                    select_cursor.close()
                    cnx.close()
                    raise utils.AuthenticationException(
                        message=api_response.text,
                        error_code=api_response.status_code)

            # Got a pair of valid tokens back. Update the structures then
            else:
                auth_dict[result[column_list.index('user_type')]] = eval(
                    api_response.text)

            # And grab the next result for another iteration of this
            result = select_cursor.fetchone()

        # The while loop is done here and I've processed all results thus far. All its left to do is return the updated authorization dictionary
        return auth_dict
def get_asset_env_data(start_date,
                       end_date,
                       variable_list,
                       asset_name=None,
                       asset_id=None,
                       filter_nones=True):
    """
    Use this method to retrieve the environmental data between two dates specified by the pair base_date and time_interval, for each of the variables indicated in the variables list and for an asset identified by at least one of the elements in
    the pair asset_name/asset_id.
    :param start_date: (datetime.datetime) A datetime.datetime object denoting the beginning (the oldest date element) of the time window for data  retrieval
    :param end_date: (datetime.datetime) A datetime.datetime object denoting the end (the newest date element) of the time window for data retrieval.
    :param variable_list: (list of str) A list with the variable names (ontology names) that are to be retrieved from the respective database table. Each one of the elements in the list provided is going to be validated against the 'official'
    list
    in proj_config.ontology_names.
    :param asset_name: (str) The name of the asset entity from where the data is to be retrieved from. This method expects either this parameter or the respective id to be provided and does not execute unless at least one of them is present.
    :param asset_id: (str) The id string associated to an asset element in the database, i.e., the 32 byte hexadecimal string in the usual 'xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx' format. This method expects either this element or the asset name to
    be provided before continuing. If none are present, the respective Exception is raised.
    :param filter_nones: (bool) Set this flag to True to exclude any None values from the final result dictionary. Otherwise the method returns all values, including NULL/None ones.
    :raise utils.InputValidationException: If any of the inputs fails the initial data type validation or if none of the asset identifiers (name or id) are provided.
    :raise mysql_utils.MySQLDatabaseException: If any errors occur during the database accesses.
    :return response (dict): This method returns a response dictionary in a format that is expected to be serialized and returned as a REST API response further on. For this method, the response dictionary has the following format:
        response =
            {
                env_variable_1: [
                    {
                        timestamp_1: <str>,
                        value_1: <str>
                    },
                    {
                        timestamp_2: <str>.
                        value_2: <str>
                    },
                    ...,
                    {
                        timestamp_N: <str>,
                        value_N: <str>
                    }
                ],
                env_variable_2: [
                    {
                        timestamp_1: <str>,
                        value_1: <str>
                    },
                    {
                        timestamp_2: <str>.
                        value_2: <str>
                    },
                    ...,
                    {
                        timestamp_N: <str>,
                        value_N: <str>
                    }
                ],
                ...
                env_variable_N: [
                    {
                        timestamp_1: <str>,
                        value_1: <str>
                    },
                    {
                        timestamp_2: <str>.
                        value_2: <str>
                    },
                    ...,
                    {
                        timestamp_N: <str>,
                        value_N: <str>
                    }
                ]
            }
    """
    log = ambi_logger.get_logger(__name__)

    # Validate inputs

    # Check if at least one element from the pair asset_name/asset_id was provided
    if not asset_name and not asset_id:
        error_msg = "Missing both asset name and asset id from the input parameters. Cannot continue until at least one of these is provided."
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    if asset_name:
        utils.validate_input_type(asset_name, str)

    if asset_id:
        utils.validate_id(entity_id=asset_id)

    utils.validate_input_type(start_date, datetime.datetime)
    utils.validate_input_type(end_date, datetime.datetime)

    if start_date > datetime.datetime.now():
        error_msg = "The start date provided: {0} is invalid! Please provide a past date to continue...".format(
            str(start_date))
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    if end_date > datetime.datetime.now():
        error_msg = "The end date provided: {0} is invalid! Please provide a past or current date to continue....".format(
            str(end_date))
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    if start_date >= end_date:
        error_msg = "Invalid time window for data retrieval provided: {0} -> {1}. Cannot continue until a start_date < end_date is provided!".format(
            str(start_date), str(end_date))
        log.error(msg=error_msg)
        raise utils.InputValidationException(message=error_msg)

    utils.validate_input_type(variable_list, list)
    for i in range(0, len(variable_list)):
        # Check inf the element is indeed a str as expected
        utils.validate_input_type(variable_list[i], str)
        # Take the change to normalize it to all lowercase characters
        variable_list[i] = variable_list[i].lower()

        # And check if it is indeed a valid element
        if variable_list[i] not in proj_config.ontology_names:
            log.warning(
                "Attention: the environmental variable name provided: {0} is not among the ones supported:\n{1}\nRemoving it from the variable list..."
                .format(str(variable_list[i]),
                        str(proj_config.ontology_names)))
            variable_list.remove(variable_list[i])

    # Check if the last operation didn't emptied the whole environmental variable list
    if len(variable_list) == 0:
        error_msg = "The variable list is empty! Cannot continue until at least one valid environmental variable is provided"
        log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # The asset_id is one of the most important parameters in this case. Use the provided arguments to either obtain it or make sure the one provided is a valid one. If both parameters were provided (asset_id and asset_name)
    if asset_id:
        # If an asset id was provided, use it to obtain the associated name
        asset_name_db = retrieve_asset_name(asset_id=asset_id)

        # Check if any names were obtained above and, if so, check if it matches any asset name also provided
        if asset_name:
            if asset_name != asset_name_db:
                log.warning(
                    "The asset name obtained from {0}.{1}: {2} does not matches the one provided: {3}. Defaulting to {2}..."
                    .format(
                        str(user_config.access_info['mysql_database']
                            ['database']),
                        str(proj_config.mysql_db_tables['tenant_assets']),
                        str(asset_name_db), str(asset_name)))
                asset_name = asset_name_db

    if not asset_id and asset_name:
        # Another case: only the asset name was provided but no associated id. Use the respective method to retrieve the asset id from the name
        asset_id = retrieve_asset_id(asset_name=asset_name)

        # Check if a valid id was indeed returned (not None)
        if not asset_id:
            error_msg = "Invalid asset id returned from {0}.{1} using asset_name = {2}. Cannot continue...".format(
                str(user_config.access_info['mysql_database']['database']),
                str(proj_config.mysql_db_tables['tenant_assets']),
                str(asset_name))
            log.error(msg=error_msg)
            raise utils.InputValidationException(message=error_msg)

    utils.validate_input_type(filter_nones, bool)

    # Initial input validation cleared. Before moving any further, implement the database access objects and use them to retrieve a unique, single name/id pair for the asset in question
    database_name = user_config.access_info['mysql_database']['database']
    asset_device_table_name = proj_config.mysql_db_tables['asset_devices']
    device_data_table_name = proj_config.mysql_db_tables['device_data']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)

    # All ready for data retrieval. First, retrieve the device_id for every device associated to the given asset
    sql_select = """SELECT toId, toName FROM """ + str(
        asset_device_table_name
    ) + """ WHERE fromEntityType = %s AND fromId = %s AND toEntityType = %s;"""

    select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                  sql_statement=sql_select,
                                                  data_tuple=('ASSET',
                                                              asset_id,
                                                              'DEVICE'))

    # Analyse the execution results
    if select_cursor.rowcount is 0:
        error_msg = "Asset (asset_name = {0}, asset_id = {1}) has no devices associated to it! Cannot continue...".format(
            str(asset_name), str(asset_id))
        log.error(msg=error_msg)
        select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)
    else:
        log.info(
            "Asset (asset_name = {0}, asset_id = {1}) has {2} devices associated."
            .format(str(asset_name), str(asset_id),
                    str(select_cursor.rowcount)))

    # Extract the devices id's to a list for easier iteration later on
    record = select_cursor.fetchone()
    device_id_list = []

    while record:
        device_id_list.append(record[0])

        # Grab another one
        record = select_cursor.fetchone()

    # Prepare a mash up of all device_id retrieved so far separated by OR statements to replace the last element in the SQL SELECT statement to execute later on
    device_id_string = []

    for _ in device_id_list:
        device_id_string.append("deviceId = %s")

    # And now connect them all into a single string stitched together with 'OR's
    device_where_str = """ OR """.join(device_id_string)

    # Store the full results in this dictionary
    result_dict = {}

    # Prepare the SQL SELECT to retrieve data from
    for i in range(0, len(variable_list)):
        # And the partial results in this one
        sql_select = """SELECT timestamp, value FROM """ + str(
            device_data_table_name) + """ WHERE ontologyId = %s AND (""" + str(
                device_where_str
            ) + """) AND (timestamp >= %s AND timestamp <= %s);"""

        # Prepare the data tuple by joining together the current ontologyId with all the deviceIds retrieved from before
        data_tuple = tuple([variable_list[i]] + device_id_list + [start_date] +
                           [end_date])

        select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                      sql_statement=sql_select,
                                                      data_tuple=data_tuple)

        # Analyse the execution outcome
        if select_cursor.rowcount > 0:
            # Results came back for this particular ontologyId. For this method, the information of the device that made the measurement is irrelevant. Create a dictionary entry for the ontologyId parameter and populate the list of dictionaries
            # with the data retrieved
            result_dict[variable_list[i]] = []

            # Process the database records
            record = select_cursor.fetchone()

            while record:
                # Check if the filtering flag is set
                if filter_nones:
                    # And if so, check if the current record has a None as its value
                    if record[1] is None:
                        # If so, grab the next record and skip the rest of this cycle
                        record = select_cursor.fetchone()
                        continue

                result_dict[variable_list[i]].append({
                    "timestamp":
                    str(int(record[0].timestamp())),
                    "value":
                    str(record[1])
                })

                # Grab the next record in line
                record = select_cursor.fetchone()

    # All done it seems. Close down the database access elements and return the results so far
    select_cursor.close()
    cnx.close()
    return result_dict
Пример #28
0
def getTenantDevices(type=None, textSearch=None, sortProperty=None, sortOrder=None, pageSize=10, page=10):
    """GET method to retrieve the list of devices with their associations, namely Tenants and Customers. The indexer of the returned list is the DEVICE (or its id to be more precise).
    @:type user_types allowed for this service: CUSTOMER_USER
    @:param type (str) - Use this field to narrow down the type of device to return. The type referred in this field is the custom device type defined by the user upon its creation (e.g., 'Thermometer', 'Water meter' and so on) and this field is
    ultra sensitive. If a device type is defined as 'Thermometer', providing type = 'thermometer' doesn't return any results just because the uppercase difference. So, in order to be used, the caller must know precisely which types of devices were
    defined in the system so far.
    @:param textSearch (str) - Use this field to narrow down results based only in the 'name' field. Like the previous parameter, the string inserted in this field has to be exactly identical to what is in a record's 'name' field to return any
    results. For example, if a device is named 'Water Meter A2', just using 'water Meter A2' instead of the exact string (upper/lower case respected) is enough to get an empty set as response
    @:param idOffset (str) - A similar field as the two before in the sense that its search scope is limited to 'id' fields. But in this particular case, since a device can be potentially associated to several types of other ids (a single device
    can be
    associated to multiple tenants and/or multiple customers, each having its id value explicit in the records), a search using this parameter can result in all devices with a given id, along with their associated tenants and customers if the id
    in the argument belongs to a device, or it can return all devices associated to a particular tenant or customer if the id string provided is of this type. Also, unlike the previous fields, this one allows searches for partial id strings (but
    only if part of the last segment of the id string are omitted. More than that yields no results whatsoever).
    @:param textOffset (str) - Still no clue what this field does... Leave it empty or write your whole life story in it and it always returns the full result set... (If none of the other fields are filled)
    @:param limit (int) - Use this field to limit the number of results returned, regardless of other limiters around (the other fields of the method). If the limit field did truncated the results returned, the result dictionary is returned with
    the 'nextPageLink' key set to another dictionary describing just that and the 'hasNext' key is set to True. Otherwise, if all record were returned, 'nextPageLink' is set to NULL and 'hasNext' comes back set to False.
    @:raise utils.InputValidationException - For errors during the validation of inputs
    @:raise utils.ServiceEndpointException - For errors during the API operation
    @:raise Exception - For any other types of errors
    @:return A HTTP response object containing the following result dictionary (if the API call was successful):
    {
        "data": [
            {
               device_1_data
           },
           {
               device_2_data
           },
           ...
           {
               device_n_data
           }],
    "nextPageLink": null,
    "hasNext": false
    }
    Each element of the 'data' key associated list is the description of a single device in the database using the following format:
    {
      "id": {
        "entityType": str,
        "id": str
      },
      "createdTime": int,
      "additionalInfo": str,
      "tenantId": {
        "entityType": str,
        "id": str
      },
      "customerId": {
        "entityType": str,
        "id": str
      },
      "name": str,
      "type": str,
      "label": str
    }
    The way that ThingsBoard manages these devices internally guarantees that a single device can only be associated to a single tenant and a single customer, which simplifies quite a lot the logic that I need to take to process this data later on
     """
    tenant_device_log = ambi_logger.get_logger(__name__)

    # Validate inputs
    # Start by the mandatory ones (only the limit)
    # utils.validate_input_type(limit, int)
    utils.validate_input_type(pageSize, int)
    utils.validate_input_type(page, int)

    # And then go for the optional ones
    if type:
        utils.validate_input_type(type, str)
    if textSearch:
        utils.validate_input_type(textSearch, str)
    # if idOffset:
    #    utils.validate_input_type(idOffset, str)
    # if textOffset:
    #    utils.validate_input_type(textOffset, str)
    if sortProperty:
        utils.validate_input_type(sortProperty, str)
    if sortOrder:
        utils.validate_input_type(sortOrder, str)

    # Check the number passed in limit: zero or negative values are not allowed by the API
    # if limit <= 0:
    if pageSize <= 0 or page < 0:
        error_msg = "Invalid page or page size provided: {0}. Please provide a positive, greater than zero page or page size value!".format(str(pageSize))
        tenant_device_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Start with the base endpoint
    service_endpoint = "/api/tenant/devices?"

    url_strings = []

    # Lets start building the API request strings to add to the endpoint
    if type:
        # Escape the string to URL-esque before adding it to the main service endpoint URL string as well as the forward slashes to '%2F' given that the quote method doesn't do that
        url_type = "type=" + urllib.parse.quote(type.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_type)

    if textSearch:
        url_textSearch = "textSearch=" + urllib.parse.quote(textSearch.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textSearch)

    """
    if idOffset:
        url_idOffset = "idOffset=" + urllib.parse.quote(idOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_idOffset)

    if textOffset:
        url_textOffset = "textOffset=" + urllib.parse.quote(textOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textOffset)
    """
    if sortProperty:
        url_sortProperty = "sortProperty=" + urllib.parse.quote(sortProperty.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_sortProperty)

    if sortOrder:
        url_sortOrder = "sortOrder=" + urllib.parse.quote(sortOrder.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_sortOrder)

    # url_strings.append("limit=" + str(limit))

    url_strings.append("pageSize=" + str(pageSize))
    url_strings.append("page=" + str(page))

    # Concatenate all the url_strings elements into single string, each individual element separated by '&' as expected by the remote API and appended to the base service endpoint
    service_endpoint += '&'.join(url_strings)

    # Get the standard service dictionary from the utils method

    service_dict = utils.build_service_calling_info(mac.get_auth_token('tenant_admin'), service_endpoint)

    # Try to get a response from the remote API
    try:
        response = requests.get(url=service_dict['url'], headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(str(service_dict['url']))
        tenant_device_log.error(error_msg)
        raise ce

    # If I got a response, check first if it was the expected HTTP 200 OK
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received an HTTP " + str(eval(response.text)['status']) + " with message: " + str(eval(response.text)['message'])
        tenant_device_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # Before sending the result back, check first the status of the 'hasNext' key in the result dictionary and inform the user that, if it is True, there are results still left to return in the remote API server
        if eval(utils.translate_postgres_to_python(response.text))['hasNext']:
            tenant_device_log.warning("Only {0} results returned. There are still more results to return from the remote API side. Increase the 'limit' argument to obtain them.".format(str(limit)))

        return response
Пример #29
0
def getCustomerDevices(customer_name, type=None, textSearch=None, idOffset=None, textOffset=None, limit=50):
    """Method that executes a GET request to the device-controller.getCustomerDevice service to the remote API in order to obtain a list of devices associated with the customer identified by 'customer_name'. For now, this method then sends that
    information to be used to update the ambiosensing_thingsboard.thingsboard_devices_tables. This method is but a subset of the getTenantDevices method from this own module in the sense that, by specifying a user during the method call,
    the list of devices returned is limited to just the devices assigned to this customer while the getTenantDevices returns the list of all devices, as long as they are assigned to a tenant, regardless of whom that tenant may be.
    @:type user_types allowed for this service: TENANT_ADMIN, CUSTOMER_USER
    @:param customer_name (str) - The name of the customer as it was defined in its registration in the ThingsBoard interface. This parameter is going to be use to perform SELECT operations in the MySQL database using 'LIKE' clauses so,
    unlike some of the fields in the API service requests, there's some flexibility here for using names that are not exactly identical to what is in the database. The search is going to be applied to the 'name' column of the
    thingsboard_customers_table. Retrieved customer records via this interface are then used to build the service call to the remote API
    @:param type (str) - Use this field to narrow down results based on the type of device to return. The type field is set during the device registration in the ThingsBoard platform and can then be used later to associate various devices to the
    same type (e.g., 'Thermometer', 'luximeter', etc..). The search operation is case-sensitive, i.e., only complete type matches are returned.
    @:param textSearch (str) - Use this field to narrow down the number of returned results based on the 'name' field. Like the previous field, this one is also case-sensitive (only identical matches return results)
    @:param idOffset (str) - Another search field based on the 'id' parameter this time. It does provide just a little bit of flexibility when compared with previous search fields, in the sense that it accepts and processes incomplete id strings,
    as long as some (but not all) of the 12 character segment of its last block are omitted.
    @:param textOffset (str) - Still no clue on what this might be used for...
    @:param limit (int) - Use this field to truncate the number of returned results. If the result set returned from the remote API was truncated for whatever reason, the result dictionary is returned with another dictionary under the
    'nextPageLink' key detailing the results still to be returned and the 'hasNext' key set to True. Otherwise 'nextPageLink' is set to NULL and 'hasNext' to False
    @:raise utils.InputValidationException - For errors during the validation of inputs
    @:raise utils.ServiceEndpoointException - For error during the remote API access
    @:raise Exception - For any other errors
    @:return A HTTP response object containing the following result dictionary:
    {
        "data": [
            {
                customer_device_1_data
            },
            {
                customer_device_2_data
            },
            ...
            {
                customer_device_n_data
            }
        ],
        "nextPageLink": null,
        "hasNext": false
    }

    Each customer_device_data element is a dictionary in the following format:
    customer_device_n_data = {
        "id": {
            "entityType": str,
            "id": str
        },
        "createdTime": int,
        "additionalInfo": null or {
            "description": str
        },
        "tenantId": {
            "entityType": str,
            "id": str
        },
        "customerId": {
            "entityType": str,
            "id": str
        },
        "name": str,
        "type": str,
        "label": str
    }
    """

    customer_device_log = ambi_logger.get_logger(__name__)
    module_table_key = 'customers'
    columns_to_retrieve = ['id']

    # Validate inputs
    try:
        # Start by the mandatory ones first
        utils.validate_input_type(customer_name, str)
        utils.validate_input_type(limit, int)
        if type:
            utils.validate_input_type(type, str)
        if textSearch:
            utils.validate_input_type(textSearch, str)
        if idOffset:
            utils.validate_input_type(idOffset, str)
        if textOffset:
            utils.validate_input_type(textOffset, str)
    except utils.InputValidationException as ive:
        customer_device_log.error(ive.message)
        raise ive

    # Check the number passed in the limit argument for consistency
    if limit <= 0:
        error_msg = "Invalid limit provided: {0}. Please provide a greater than zero limit value!".format(str(limit))
        customer_device_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Before going any further, there a limiting factor here: the customer id. I need to use the customer_name parameter to fetch it through a database consultation. The customer search is going to be an exhaustive one: I'll first try to search
    # for the customer_name that was passed as is. If a single result is returned - the desired outcome - cool, move on. If not, try to add a wildcard character at the end of customer_name (customer_name%), then to just the beginning (
    # %customer_name) and, if I still can't find a single result, try one last time with wildcard on both ends of the string (%customer_name%) in order to get an unique record (multiple records returned are also disregarded). If no clear answer is
    # obtained thus far, raise an Exception with this information
    # Connect to the MySQL database
    cnx = mysql_utils.connect_db(user_config.mysql_db_access['database'])

    # And get a buffered cursor to run SQL statements
    select_cursor = cnx.cursor(buffered=True)

    # Build the SQL SELECT statement to execute in the MySQL database context
    sql_select = """SELECT """ + ", ".join(columns_to_retrieve) + """ FROM """ + str(proj_config.mysql_db_tables[module_table_key]) + """ WHERE name LIKE %s;"""

    # Run the statement and check what comes back
    select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str(customer_name),))

    # If I got a single result from the last SQL execution, I don't need to retrieve the record itself to check it: the cursor retains the number of records found in the statement that was just executed in its rowcount internal variable (which is
    # effectively the same as running a SELECT COUNT(*) instead)

    if select_cursor.rowcount != 1:
        # If the last statement failed, try again with a wildcard character at the end of the customer_name
        customer_device_log.warning("Unable to get an unique result searching for a customer_name = {0} (got {1} results instead). Trying again using customer_name = {2}..."
                                    .format(str(customer_name), str(select_cursor.rowcount), str(customer_name + "%")))
        select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str(customer_name + "%"),))

        if select_cursor.rowcount != 1:
            customer_device_log.warning("Unable to get an unique result searching for a customer_name = {0} (got {1} result instead). Trying again using customer_name = {2}..."
                                        .format(str(customer_name + "%"), str(select_cursor.rowcount), str("%" + customer_name)))
            select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str("%" + customer_name),))

            if select_cursor.rowcount != 1:
                customer_device_log.warning("Unable to get an unique result searching for a customer_name = {0} (got {1} result instead). Trying again using customer_name = {2}..."
                                            .format(str("%" + customer_name), str(select_cursor.rowcount), str("%" + customer_name + "%")))
                select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str("%" + customer_name + "%"),))

                if select_cursor.rowcount != 1:
                    error_msg = "The method was unable to retrieve an unique record for customer_name = {0} (got {1} results instead). Nowhere to go but out now..."\
                        .format(str("%" + customer_name + "%"), str(select_cursor.rowcount))
                    customer_device_log.error(error_msg)
                    exit(-1)

    # If my select_cursor was able to go through the last flurry of validation, retrieve the result obtained
    result = select_cursor.fetchone()

    # The SQL SELECT result returns records as n-element tuples, n the number of columns returned. The SQL statement in this method queries for a single column: 'id', so any result returned should be a single element tuple
    customer_id = str(result[0])

    # I now have everything that I need to place a call to the remote API service. Build the service endpoint
    service_endpoint = "/api/customer/{0}/devices?".format(customer_id)

    url_strings = []
    if type:
        # Don't forget to escape the url strings characters to URL-compatible characters, including the '/' character for '%2F'
        url_strings.append("type=" + urllib.parse.quote(type.encode('UTF-8')).replace('/', '%2F'))
    if textSearch:
        url_strings.append("textSearch=" + urllib.parse.quote(textSearch.encode('UTF-8')).replace('/', '%2F'))
    if idOffset:
        url_strings.append("idOffset=" + urllib.parse.quote(idOffset.encode('UTF-8')).replace('/', '%2F'))
    if textOffset:
        url_strings.append("textOffset=" + urllib.parse.quote(textOffset.encode('UTF-8')).replace('/', '%2F'))
    url_strings.append("limit=" + str(limit))

    # Concatenate all the gathered url_strings together with the rest of the service_endpoint, using '&' as a separator
    service_endpoint += '&'.join(url_strings)

    # Get the request dictionary using a REGULAR type authorization token
    service_dict = utils.build_service_calling_info(mac.get_auth_token(user_type='tenant_admin'), service_endpoint)

    # Query the remote API
    try:
        response = requests.get(url=service_dict['url'], headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(str(service_dict['url']))
        customer_device_log.error(error_msg)
        raise utils.ServiceEndpointException(message=ce)

    # If I got a response, check first if it was the expected HTTP 200 OK
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received an HTTP " + str(eval(response.text)['status']) + " with message: " + str(eval(response.text)['message'])
        customer_device_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # I got a valid results, it appears. Check if the number of results returned was truncated by the limit parameter. If so, warn the user only (there's no need to raise Exceptions on this matter)
        # Translate the results to Python-speak first before going for the comparison given that this result set was returned from a MySQL backend
        if eval(utils.translate_postgres_to_python(response.text))['hasNext']:
            customer_device_log.warning("Only {0} results returned. There are still results to return from the remote API side. Increase the 'limit' argument to obtain them.".format(str(limit)))

    # I'm good then. Return the result set back
    return response
def retrieve_asset_id(asset_name):
    """
    This method receives the name of an asset and infers the associated id from it by consulting the respective database table.
    :param asset_name: (str) The name of the asset to retrieve from ambiosensing_thingsboard.tb_tenant_assets. This method can only implement the limited searching capabilities provided by the MySQL database. If these are not enough to retrieve an
    unique record associated to the asset name provided, the method 'fails' (even though there might be a matching record in the database but with some different uppercase/lowercase combination than the one provided) and returns 'None' in that case.
    :raise utils.InputValidationException: If the input argument fails the data type validation.
    :raise mysql_utils.MySQLDatabaseException: If any errors occur when accessing the database.
    :return asset_id: (str) If a unique match was found to the provided asset_name. None otherwise.
    """
    log = ambi_logger.get_logger(__name__)

    # Validate the input
    utils.validate_input_type(asset_name, str)

    # Prepare the database access elements
    database_name = user_config.access_info['mysql_database']['database']
    table_name = proj_config.mysql_db_tables['tenant_assets']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)

    sql_select = """SELECT entityType, id FROM """ + str(
        table_name) + """ WHERE name = %s;"""

    select_cursor = mysql_utils.run_sql_statement(cursor=select_cursor,
                                                  sql_statement=sql_select,
                                                  data_tuple=(asset_name, ))

    # Analyse the execution
    if select_cursor.rowcount is 0:
        log.warning(
            "No records returned from {0}.{1} using asset_name = {2}".format(
                str(database_name), str(table_name), str(asset_name)))
        select_cursor.close()
        cnx.close()
        return None
    elif select_cursor.rowcount > 1:
        log.warning(
            "{0}.{1} returned {2} records for asset_name = {3}. Cannot continue..."
            .format(str(database_name), str(table_name),
                    str(select_cursor.rowcount), str(asset_name)))
        select_cursor.close()
        cnx.close()
        return None
    else:
        # Got a single result back. Check if the entityType matches the expected one
        record = select_cursor.fetchone()
        if record[0] != 'ASSET':
            error_msg = "The record returned from {0}.{1} using asset_name = {2} has a wrong entityType. Got a {3}, expected an 'ASSET'".format(
                str(database_name), str(table_name), str(asset_name),
                str(record[0]))
            log.error(msg=error_msg)
            select_cursor.close()
            cnx.close()
            return None
        else:
            # All is well. Return the retrieved id after validation
            asset_id = record[1]
            utils.validate_id(entity_id=asset_id)

            # If the parameter returned survived the last battery of validations, all seems OK. Return the result
            select_cursor.close()
            cnx.close()
            return asset_id