Example #1
0
def get_device_types():
    """
    Simplest method around. Useful for basic tests. This method simply executes and returns the results of the API call to the corresponding ThingsBoard API remote service.
    :return response: (dict) A dictionary with all the supported device types
    """
    log = ambi_logger.get_logger(__name__)

    response = tb_device_controller.getDeviceTypes()

    if response.status_code != 200:
        error_msg = "Received a HTTP {0} with the message {1}".format(str(response.status_code), str(eval(response.text)['message']))
        log.error(msg=error_msg)
        raise utils.ServiceEndpointException(message=error_msg)

    return eval(utils.translate_postgres_to_python(response.text))
def update_tenant_assets_table():
    """
    Method to populate the database table with all the ASSETS belonging to a given tenant. I'm still trying to figure out the actual logic behind this call since the API service that I call to retrieve the desired information uses a
    'customer_user' type credential pair but the results returned are related to the Tenant that is associated to Customer identified by those credentials (why not use the 'tenant_admin' credentials instead? Makes more sense in my honest opinion.
    In fact, using those credentials yields a HTTP 403 - access denied - response from the remote server... go figure...) so the relation with a Tenant is tenuous, at best. Anyhow, what matters is that all configured assets in the ThingsBoard
    platform seem to be returned at once by the tb side method, so now its just a matter of putting them into a proper database table
    :raise mysql_utils.MySQLDatabaseException: For errors with the database operation
    :raise utils.ServiceEndpointException: For errors with the remote API service execution
    :raise utils.AuthenticationException: For errors related with the authentication credentials used.
    """

    # Key to return the actual database name from proj_config.mysql_db_tables dictionary
    module_table_key = 'tenant_assets'
    # A generous limit number to retrieve all results in the remote API side
    limit = 50

    # Grab the response from the remote API then
    response = tb_asset_controller.getTenantAssets(limit=limit)

    # Translate the response got and convert it to the expected dictionary
    response_dict = eval(utils.translate_postgres_to_python(response.text))

    # And retrieve the core of the stuff I'm interested into
    asset_list = response_dict['data']

    # Process the returned assets one by one
    for asset in asset_list:
        # Suppress the 'tenantId' sub-dictionary by replacing it by its 'id' parameter with a new key that matches a column name now. A tenantId already implies a TENANT entityType, so the later is redundant and can go out
        asset['tenantId'] = asset['tenantId']['id']

        # The same goes for the CustomerId
        asset['customerId'] = asset['customerId']['id']

        # As always, expand the asset dictionary into a one level dictionary
        asset = utils.extract_all_key_value_pairs_from_dictionary(
            input_dictionary=asset)

        # And replace any retarded POSIX timestamps for nice DATETIME compliant objects
        try:
            asset[
                'createdTime'] = mysql_utils.convert_timestamp_tb_to_datetime(
                    timestamp=asset['createdTime'])
        except KeyError:
            pass

        # All set. Invoke the database updater then
        database_table_updater.add_table_data(
            asset, proj_config.mysql_db_tables[module_table_key])
def update_tenants_table():
    """This method is the database version, of sorts, of the tenant_controller functions, namely the getTenants() one. This method uses the later function to get data about the current tenants in the corresponding database table, checks the
     existing tenant data and acts accordingly: new tenants are inserted as a new record in the database, missing tenants are deleted and modified tenants get their records updated. This methods does all this through sub methods that add,
     delete and update tenant records (so that, later on, one does not become restricted to this only method to alter the tenants table. Any of the other, more atomized methods can be used for more precise operation in the database"""

    # Fetch the data from the remote API. Set a high value for the limit argument. If it still are results left to return, this method call prints a warning log about it. Change this value accordingly if that happens
    # The eval command casts the results to the base dictionary returned
    # Get the response object from the API side method

    # The key that I need to use to retrieve the correct table name for where I need to insert the tenant data
    module_table_key = 'tenants'
    limit = 50
    response = tb_tenant_controller.getTenants(limit=limit)
    response_dict = eval(utils.translate_postgres_to_python(response.text))

    # Before processing the results, check if all of them were returned in the last call and warn the user otherwise
    if response_dict['hasNext']:
        # Create the logger first. Since I only needed for this single instance, in this particular case the logger is going to be circumscribed to this little if clause
        update_tenants_log = ambi_logger.get_logger(__name__)
        update_tenants_log.warning("Not all results from the remote API were returned on the last call (limit = {0}). Raise the limit parameter to retrieve more".format(str(limit)))
    # Extract just the part that I'm concerned with
    tenant_list = response_dict['data']

    # Each element in the tenant list is a tenant. Process them one by one then using the insert and update functions. Actually, the way I wrote these functions, you can call either of them since their internal logic decides,
    # based on what's already present in the database, what is the best course of action (INSERT or UPDATE)
    for tenant in tenant_list:
        # Two things that need to be done before sending the data to the database: expand any sub-level in the current tenant dictionary
        tenant = utils.extract_all_key_value_pairs_from_dictionary(input_dictionary=tenant)

        # And replace any POSIX-type timestamps for the MySQL friendly DATETIME type
        try:
            tenant['createdTime'] = mysql_utils.convert_timestamp_tb_to_datetime(timestamp=tenant['createdTime'])
        except KeyError:
            # Ignore if this key doesn't exist in the tenant dictionary
            pass

        database_table_updater.add_table_data(tenant, proj_config.mysql_db_tables[module_table_key])
def update_customer_table():
    """Use this method to run an update on the MySQL table where the customer data is aggregated. This method places a call to retrieve the latest customer related data from the remote API and uses the table updater to decide what to do regarding
    the various returned records: insert a new record in the MySQL database, update an existing database record or do nothing (if the existing and returned records are identical)"""

    module_table_key = 'customers'
    limit = 50

    # Grab the data from the remote API
    response = tb_customer_controller.getCustomers(limit=limit)

    # Translate the response text to replace the PostGres-speak returned for Python-speak. And cast that text into a dictionary too
    response_dict = eval(utils.translate_postgres_to_python(response.text))

    # Extract the list of customers from the returned dictionary under the 'data' key
    customer_list = response_dict['data']

    # And process them one by one
    for customer in customer_list:
        # The customer table has an almost one-to-one correspondence between the dictionary keys returned in each customer and the column names in the MySQL database, except for the sub dictionary that is passed under the tenantId key (which as a
        # 'entityType and a id keys and strings as values). In the MySQL database I've condensed that entry into a column named 'tenantId' that should receive just the string under the 'id' key from the current customer dictionary. This means that
        # I should replace this sub dictionary for a 'tenantId': <id_string> at this point or otherwise this process is going to crash later on when it tries to send that data to be added to the customers table
        customer['tenantId'] = customer['tenantId']['id']

        # Remove any multilevel dictionary from the input structure
        customer = utils.extract_all_key_value_pairs_from_dictionary(input_dictionary=customer)

        # And hunt for any datetime fields that are still in the POSIX format
        try:
            # In this particular case, the datetime object comes from the API side as 'createdTime'
            customer['createdTime'] = mysql_utils.convert_timestamp_tb_to_datetime(timestamp=customer['createdTime'])
        except KeyError:
            # If the customer structure doesn't have that key, ignore it.
            pass

        # Send the data to be added to the MySQL database in the customers table
        database_table_updater.add_table_data(data_dict=customer, table_name=proj_config.mysql_db_tables[module_table_key])
def update_asset_devices_table():
    """Use this method to fill out the asset devices table that corresponds devices to the assets that are related to them. The idea here is to use ASSETs to represent spaces and the ThingsBoard relation property to associate DEVICEs to those
    assets as a way to represent the devices currently installed and monitoring that space
    @:raise mysql_utils.MySQLDatabaseException - For problems related with the database access
    @:raise utils.ServiceEndpointException - For issues related with the remote API call
    @:raise utils.AuthenticationException - For problems related with the authentication credentials used"""

    asset_devices_log = ambi_logger.get_logger(__name__)

    asset_devices_table_name = proj_config.mysql_db_tables['asset_devices']
    assets_table_name = proj_config.mysql_db_tables['tenant_assets']
    devices_table_name = proj_config.mysql_db_tables['devices']

    # Fire up the database access objects
    database_name = user_config.access_info['mysql_database']['database']
    cnx = mysql_utils.connect_db(database_name=database_name)
    outer_select_cursor = cnx.cursor(buffered=True)
    inner_select_cursor = cnx.cursor(buffered=True)

    mysql_utils.reset_table(table_name=asset_devices_table_name)

    # First get a list of all the assets supported so far. Refresh the asset database table first of all
    mysql_asset_controller.update_tenant_assets_table()
    # And the devices table too since I need data from there too later on
    mysql_device_controller.update_devices_table()

    # And grab all assets ids, names and types (I need those for later)
    sql_select = """SELECT id, name, type FROM """ + str(
        assets_table_name) + """;"""

    # Execute the statement with the select cursor
    outer_select_cursor = mysql_utils.run_sql_statement(
        cursor=outer_select_cursor, sql_statement=sql_select, data_tuple=())

    # Check if any results came back
    if outer_select_cursor.rowcount <= 0:
        error_msg = "Unable to get any results from {0}.{1} with '{2}'...".format(
            str(database_name), str(assets_table_name),
            str(outer_select_cursor.statement))
        asset_devices_log.error(error_msg)
        outer_select_cursor.close()
        inner_select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)

    # Got some results. Process them then
    else:
        # For each valid ASSET Id found in this table, run a query in the ThingsBoard side of things for all DEVICEs that have a relation to that asset and send it to the database
        asset_info = outer_select_cursor.fetchone()

        # Set the common used parameters for the entity-relation call
        entityType = "ASSET"
        relationTypeGroup = "COMMON"
        direction = "FROM"

        # Run this while there are still asset ids to process
        while asset_info:

            # Query for related devices
            api_response = tb_entity_relation_controller.findByQuery(
                entityType=entityType,
                entityId=asset_info[0],
                relationTypeGroup=relationTypeGroup,
                direction=direction)

            # Get rid of all non-Python terms in the response dictionary and cast it as a list too
            relation_list = eval(
                utils.translate_postgres_to_python(api_response.text))

            # Now lets format this info accordingly and send it to the database
            for relation in relation_list:
                # Create a dictionary to store all the data to send to the database. For now its easier to manipulate one of these and cast it to a tuple just before executing the statement
                data_dict = {
                    "fromEntityType": relation["from"]["entityType"],
                    "fromId": relation["from"]["id"],
                    "fromName": asset_info[1],
                    "fromType": asset_info[2],
                    "toEntityType": relation["to"]["entityType"],
                    "toId": relation["to"]["id"],
                    "toName": None,
                    "toType": None,
                    "relationType": relation["type"],
                    "relationGroup": relation["typeGroup"],
                }

                # As always, take care with the stupid 'description'/'additionalInfo' issue...
                if relation["additionalInfo"] is not None:
                    try:
                        # Try to get a 'description' from the returned dictionary from the 'additionalInfo' sub dictionary
                        data_dict["description"] = relation["additionalInfo"][
                            "description"]
                    # If the field wasn't set, instead of crashing the code
                    except KeyError:
                        # Simply set this field to None and move on with it...
                        data_dict["description"] = None
                else:
                    data_dict["description"] = None

                # And now to get the data to use in the INSERT statement. For that I need to do a quick SELECT first since I need info from a different side too
                if data_dict['toEntityType'] == 'DEVICE':
                    sql_select = """SELECT name, type FROM """ + str(
                        devices_table_name) + """ WHERE id = %s;"""
                elif data_dict['toEntityType'] == 'ASSET':
                    sql_select = """SELECT name, type FROM """ + str(
                        assets_table_name) + """ WHERE id = %s;"""

                data_tuple = (relation['to']['id'], )

                # NOTE: I need to use the change cursor because my select_cursor still has unprocessed results from the previous SELECT execution and using it would delete those
                inner_select_cursor = mysql_utils.run_sql_statement(
                    cursor=inner_select_cursor,
                    sql_statement=sql_select,
                    data_tuple=data_tuple)

                # Check if any results were returned. Use them if so, otherwise replace the missing results with 'Unknown'. In this case there's no point in raising Exceptions if I can't find the actual name or type of the related device
                if inner_select_cursor.rowcount > 0:
                    # I got the data I was looking for from the devices table
                    result = inner_select_cursor.fetchone()
                    data_dict["toName"] = result[0]
                    data_dict["toType"] = result[1]

                # Write the data in the database table
                mysql_utils.add_data_to_table(
                    table_name=asset_devices_table_name, data_dict=data_dict)

            # Finished with the current asset. Fetch the next one and repeat the cycle if its not None
            asset_info = outer_select_cursor.fetchone()

        # Done with everything, I believe. Close the database access objects and carry on
        outer_select_cursor.close()
        inner_select_cursor.close()
        cnx.close()
def getAttributes(entityType=None, entityId=None, deviceName=None, keys=None):
    """
    GET method to retrieve all server-type attributes configured for the device identified by the pair
    entityType/entityId or deviceName provided. This method requires either the entityType/entityId pair or the
    deviceName to be provided to execute
    this method. If insufficient data is provided, the relevant Exception shall be raised.
    :param entityType (str) - The entity type of the object whose attributes are to be retrieved.
    :param entityId (str) - The id string that identifies the device whose attributes are to be retrieved.
    :param deviceName (str) - The name of the device that can be used to retrieve the entityType/entityId.
    :param keys (list of str) - Each attribute returned is a key-value pair.
    Use this argument to provide a key based filter, i.e., if this list is set,
    only attributes whose keys match any of the list elements are to be returned.
    :raise utils.InputValidationException - If any of the inputs has the wrong data type or the method doesn't have the necessary data to execute.
    :raise utils.ServiceEndpointException - If problem occur when accessing the remote API
    :return attribute_dictionary (dict) - A dictionary with the retrieved attributes in the following format:
        attribute_dictionary =
        {
            'attribute_1_key': 'attribute_1_value',
            'attribute_2_key': 'attribute_2_value',
            ...
            'attribute_N_key': 'attribute_N_value'
        }
        where the keys in the dictionary are the ontology-specific names (official names) and the respective values
        are the timeseries keys being measured by the device that map straight into those ontology names.
        If the device identified by the argument data does exist but doesn't have any attributes configured,
        this method returns None instead.
    """
    log = ambi_logger.get_logger(__name__)

    # Validate inputs
    if entityId:
        utils.validate_id(entity_id=entityId)

        # The entityId seems OK but its useless unless the entityType was also provided or, at least, the deviceName, the method cannot continue
        if not entityType and not deviceName:
            error_msg = "A valid entityId was provided but no entityType nor deviceName were added. Cannot execute this method until a valid entityType/entityId or a valid deviceName is provided!"
            log.error(error_msg)
            raise utils.InputValidationException(message=error_msg)

    if entityType:
        utils.validate_entity_type(entity_type=entityType)

        # Again, as before, the method can only move forward if a corresponding entityId or deviceName was also provided
        if not entityId and not deviceName:
            error_msg = "A valid entityType was provided but no corresponding entityId nor deviceName. Cannot continue until a valid entityType/entityId or a valid deviceName is provided!"
            log.error(error_msg)
            raise utils.InputValidationException(message=error_msg)

    if deviceName:
        utils.validate_input_type(deviceName, str)

    if keys:
        utils.validate_input_type(keys, list)
        for key in keys:
            utils.validate_input_type(key, str)

    # If the code got to this point, I either have a valid entityId/entityType pair or a deviceName. Check if only the deviceName was provided and retrieve the entityId/entityType from it
    if deviceName and (not entityType or not entityId):
        # Get the entityId and entityType from the deviceName provided
        device_data = mysql_device_controller.get_device_credentials(
            device_name=deviceName)

        # Check if the previous statement returned a non-empty (not None) result. If that is the case, either the device is not (yet) configured in the device table or the table needs to be updated
        if not device_data:
            error_msg = "Cannot retrieve a pair of entityId/entityType from the device name provided: {0}. Either:" \
                        "\n1. The device is not yet configured in the database/ThingsBoard platform" \
                        "\n2. The MySQL device table needs to be updated." \
                        "\nCannot continue for now".format(str(deviceName))
            log.error(error_msg)
            raise utils.InputValidationException(message=error_msg)

        # The previous method returns a 3-element tuple in the format (entityType, entityId, timeseriesKeys). Grab the relevant data straight from it
        entityType = device_data[0]
        entityId = device_data[1]

    # Validation complete. I have all I need to execute the remote call
    service_endpoint = "/api/plugins/telemetry/{0}/{1}/values/attributes".format(
        str(entityType), str(entityId))

    # If a list of keys was provided, concatenate them to the current endpoint
    if keys:
        service_endpoint += "?keys="

        # Add all the keys to the endpoint concatenated in a single, comma separated (without any spaces in between) string
        service_endpoint += ",".join(keys)

    # Build the service dictionary from the endpoint already built
    service_dict = utils.build_service_calling_info(
        mac.get_auth_token(user_type='tenant_admin'),
        service_endpoint=service_endpoint)

    # Query the remote API
    try:
        response = requests.get(url=service_dict['url'],
                                headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(
            str(service_dict['url']))
        log.error(error_msg)
        raise utils.ServiceEndpointException(message=ce)

    # If a response was returned, check the HTTP return code
    if response.status_code != 200:
        error_msg = "Request not successful: Received an HTTP " + str(
            eval(response.text)['status']) + " with message: " + str(
                eval(response.text)['message'])
        log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # Got a valid result. Format the returned objects for return
        data_to_return = eval(utils.translate_postgres_to_python(
            response.text))

        if len(data_to_return) is 0:
            # Nothing to return then. Send back a None instead
            return None

        # If the request was alright, I've received the following Response Body (after eval)
        # data_to_return =
        # [
        #   {
        #       "lastUpdateTs": int,
        #       "key": str,
        #       "value": str
        #   },
        # ...
        #   {
        #       "lastUpdateTs": int,
        #       "key": str,
        #       "value": str
        #   }
        # ]
        #
        # So I need to transform this into the return structure defined above

        attribute_dictionary = {}
        for attribute_pair in data_to_return:
            # Use this opportunity to filter out any attribute returned that is not part of the measurement list desired
            if attribute_pair['value'] not in proj_config.ontology_names:
                # If the attribute value is not one in the 'official list of names', skip it
                continue
            # Create the entries defined in the man entry of this method from the list elements returned from the remote API
            attribute_dictionary[
                attribute_pair['key']] = attribute_pair['value']

        # All done. Return the attributes dictionary
        return attribute_dictionary
def getTenants(textSearch=None, idOffset=None, textOffset=None, limit=10):
    """GET method to retrieve either all tenants registered in the thingsboard server or a specific tenant by providing the related search terms.
    @:param OPTIONAL textSearch (str) - A text search string to limit the number of tenants to be returned by this operation. This functionality is quite limited I may add. It only searches the title field and only returns any results if
    this element is EXACTLY equal to the title field. Eg. textSearch='Mr Ricardo Almeida' returns that tenant information but textSearch='Ricardo Almeida' return nothing even though this string matches exactly the 'name' field
    @:param OPTIONAL idOffset (str) - A possible search pattern for just the 'id' field. (Strangely enough, using the tenant's id in the textSearch parameter yields no results). A tenant id has a fixed format [8]-[4]-[4]-[4]-[8], that is,
    a 8 character block, a '-' character, then a 4 character block, another '-' character and so on. If the idOffset provided is anything but the first four blocks, including the last '-', the remote API returns a HTTP 400 - Invalid UUID
    string. Yet adding just one character after the last '-' returns a list of all registered tenants... again, I'm still failing to see the use of this field to be honest
    @:param OPTIONAL textOffset (any) - No idea what this field is used for. I've tried searches with matching and un-matching strings, ints, floats, etc... and I always get all the tenants back. Pointless field if I ever saw one...
    @:param limit (int) - The only required field in this methods. Limits the number of results to return
    @:return tenant_data (dict) - The return element is a complex one. If successful, the returned structure is as follows:
    tenant_data = {
        'data': [
            {
                tenant_data_1
            },
            {
                tenant_data_2
            }, ...
            {
                tenant_data_n
            }
        ],
        'nextPageLink': nextPageData,
        'hasNext': bool
    }

    The latter is the overall framework of how the results are returned: a dictionary with 3 keys: 'data', 'nextPageLink' and 'hasNext'.
    The 'data' key contains a list of dictionaries, with as many items as either existing tenants or limit number, whatever is lower. Each item of the data list has the following format:
    tenant_data = {
        'id': {
            'entityType': str,
            'id': str
        },
        'createdTime': int (POSIX timestamp),
        'description': str,
        'country': str,
        'state', str,
        'city': str,
        'address': str,
        'address2': str,
        'zip': str,
        'phone': str,
        'email': str,
        'title': 'str'
        'region': str,
        'name': str
    }
    The 'nextPageLink' is either set to 'None' if all existing tenant data was returned in the 'data' value or, if the limit argument has clipped the number or returned elements, then it has the following dictionary format:
    'nextPageLink': {
        'limit': int,
        'textSearch': str,
        'textSearchBound': str,
        'textOffset': str,
        'idOffset': str
    }
    while the 'hasNext' (a boolean) key is set to either False if the limit argument was high enough or True if there are still results left to return from the remote ThingsBoard API
    """

    # Fetch a local logger for this method only
    tenant_log = ambi_logger.get_logger(__name__)

    # Validate inputs
    try:
        # Validate the mandatory inputs
        utils.validate_input_type(limit, int)
        # As for the optional ones, I need to check if they were passed with non-default values first
        if textSearch:
            utils.validate_input_type(textSearch, str)
        elif idOffset:
            utils.validate_input_type(idOffset, str)
        elif textOffset:
            utils.validate_input_type(textOffset, str)
    except utils.InputValidationException as ive:
        tenant_log.error(ive.message)
        raise ive

    # The limit parameter needs a bit more validation. Passing limit=0 also triggers an error from the remote API. By validating this parameter at this stage, there no need to deal with this potential error later on.
    if limit <= 0:
        error_msg = "Invalid limit provided: {0}. Please provide a positive, greater than zero limit value!".format(
            str(limit))
        tenant_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    service_endpoint = "/api/tenants?"

    # This service point for this particular method has its arguments built-in using a standard URL format. Because of this, I need to make sure that
    # whatever characters are passed in this method's arguments, they are properly escaped before (URL strings are notoriously picky) being added to
    # URL string

    url_strings = []
    if textSearch:
        # First encode the string into a safe character set (UTF-8) and only then do the escaping of all characters to URL charset. NOTE: By a some reason the following method escapes all problematic characters to URL-esque except one of
        # the worst ones in that regard: '/'. The forward slash, a basic character in URL strings to denote paths, should be escaped to '%2F' if it appears in an argument string but the method bellow ignores this, not sure really why...
        # Anyway, an easily solved problem by forcing a string replace for this character after executing the base function
        url_textSearch = "textSearch=" + urllib.parse.quote(
            textSearch.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textSearch)

    if idOffset:
        url_idOffset = "idOffset=" + urllib.parse.quote(
            idOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_idOffset)

    if textOffset:
        url_textOffset = "textOffset=" + urllib.parse.quote(
            textOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textOffset)

    # Add the mandatory parameter to the list as is
    url_strings.append("limit=" + str(limit))
    service_endpoint += '&'.join(url_strings)

    service_dict = utils.build_service_calling_info(
        mac.get_auth_token(user_type='sys_admin'), service_endpoint)
    try:
        response = requests.get(url=service_dict['url'],
                                headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}..".format(
            str(service_dict['url']))
        tenant_log.error(error_msg)
        raise ce

    # In order to continue, I'm only interested in HTTP 200. Whatever comes back different than that, I'm shutting down this thing
    if response.status_code != 200:
        # Capture the error message that is returned in the message body, as a dictionary encoded in a str (hence the eval to cast it from str back to dict)
        error_msg = "Received an HTTP " + str(eval(
            response.text)['status']) + " with the message: " + str(
                eval(response.text)['message'])
        tenant_log.error(error_msg)
        raise utils.ServiceEndpointException(
            message=error_msg,
            error_code=int(eval(response.text)['errorCode']))
    else:
        # Replace the troublesome elements from the API side to Python-esque (Pass it just the text part of the response. I have no use for the rest of the object anyway)

        # At this point, I'm going to check the state of the 'hasNext' key in the response and warning the user if its set to True (means that the limit argument was set at value that left some records still on the API side)
        if eval(utils.translate_postgres_to_python(response.text))['hasNext']:
            # In this case, warn the user and carry on
            tenant_log.warning(
                "There are still more results to return from the API side. Increase the 'limit' argument value to obtain them."
            )

        # I'm done. Send back the response data
        return response
def getTenantAssets(type=None, textSearch=None, idOffset=None, textOffset=None, limit=10):
    """This is the standard method to retrieve all ASSETs currently in the ThingsBoard installation database (regardless which database is implemented). As with all services of this type so far, this is the ThingsBoard side of the whole process,
    the one that places a request in the expected format to the ThingsBoard API.
    @:param type (str): OPTIONAL Use this argument to filter the results for a specific asset type (eg. 'building', 'room', 'floor', etc...) This is a free text field from the ThingsBoard side, which means any string can be set in this field. If you
    know a
    priori which exact ASSET type you're interested in, use this argument to narrow down the results to be returned
    @:param textSearch (str): OPTIONAL Use this argument to provide a str to be used to narrow down the returned results based on 'name' field from the ASSET description. As with similar methods, this field is quite limited: unless an exact match
    is found between the provided textSearch argument and the contents of the 'name' field, no filtering actually takes place.
    @:param idOffset (str): OPTIONAL Analogous field to the previous one but this one applies to the 'id' field. The filtering abilities of this argument are also quite limited. Check similar methods that use this argument too for more detailed
    descriptions.
    @:param textOffset (str): OPTIONAL So far, still no idea of what this does. Other than determining that it only accepts strings, I still have no clue to what is the actual purpose of this element.
    @:param limit (int): Use this field to limit the number of results returned from this service. If the argument in this field prevents the full scope of results to be returned, a specific set of structures, namely a 'nextPageLink' and
    'hasNext' are also returned. In this event, the method warn the caller that are results left to return but in the end is up to the caller to specify an higher limit value to return them.
    @:return result (list of dict): If the API call was successful, this method returns an HTTP response object back with the following dictionary in its 'text' field:
    "data": [
    # ASSET 1 data
    {
        "id": {
        "entityType": string,
        "id": string
      },
      "createdTime": int,
      "additionalInfo": {
        "description": "A dummy building that I'm planning to fill with dead rats and cockroaches once I'm done with it"
      },
      "tenantId": {
        "entityType": string,
        "id": string
      },
      "customerId": {
        "entityType": string,
        "id": string
      },
      "name": string,
      "type": string
    },
    # ASSET 2 Data
    {
        "id": {
        "entityType": string,
        "id": string
      },
      "createdTime": int,
      "additionalInfo": {
        "description": string
      },
      "tenantId": {
        "entityType": string,
        "id": string
      },
      "customerId": {
        "entityType": string,
        "id": string
      },
      "name": string,
      "type": string
    },
    .
    .
    .
    # ASSET N Data
    {
        "id": {
        "entityType": string,
        "id": string
      },
      "createdTime": int,
      "additionalInfo": {
        "description": string
      },
      "tenantId": {
        "entityType": string,
        "id": string
      },
      "customerId": {
        "entityType": string,
        "id": string
      },
      "name": string,
      "type": string
    }
    ]
    """
    asset_control_log = ambi_logger.get_logger(__name__)

    # Validate mandatory inputs first
    utils.validate_input_type(limit, int)

    # And now for the OPTIONAL ones
    if type:
        utils.validate_input_type(type, str)
    if textSearch:
        utils.validate_input_type(textSearch, str)
    if idOffset:
        utils.validate_input_type(idOffset, str)
    if textOffset:
        utils.validate_input_type(textOffset, str)

    # Validate the limit a bit further
    if limit <= 0:
        error_msg = "Invalid limit provided: {0}. Please provide a positive, greater than zero limit value!".format(str(limit))
        asset_control_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Setup the base endpoint
    service_endpoint = "/api/tenant/assets?"

    url_strings = []

    # Check the provided inputs and add the necessary elements to the endpoint to call the service
    if type:
        url_type = "type=" + urllib.parse.quote(type.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_type)

    if textSearch:
        url_textSearch = "textSearch=" + urllib.parse.quote(textSearch.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textSearch)

    if idOffset:
        url_idOffset = "idOffset=" + urllib.parse.quote(idOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_idOffset)

    if textOffset:
        url_textOffset = "textOffset=" + urllib.parse.quote(textOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textOffset)

    url_strings.append("limit=" + str(limit))

    # Concatenate the elements in the url_strings list into a single url string
    service_endpoint += '&'.join(url_strings)

    # Get the standard dictionary to call the remote service. It appears that different installations require different sets of user credentials... still trying to figure out what the hell is going on with this one
    service_dict = utils.build_service_calling_info(mac.get_auth_token('tenant_admin'), service_endpoint=service_endpoint)

    # And try to get a response from the remote API
    try:
        response = requests.get(url=service_dict['url'], headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(str(service_dict['url']))
        asset_control_log.error(error_msg)
        raise ce

    # Check the HTTP response code first
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received an HTTP " + str(eval(response.text)['status']) + " with message: " + str(eval(response.text)['message'])
        asset_control_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # Check if the 'hasNext' flag is set, i.e., if there are still results to return from the ThingsBoard side of things. In any case, the result structure has that flag set to either 'true' or 'false', which are not recognized as proper
        # boolean values by Python (those are boolean natives from Postgres/Cassandra). As such, I need to 'translate' the returned text to Python-esque first using the method built for that purpose
        if eval(utils.translate_postgres_to_python(response.text))['hasNext']:
            asset_control_log.warning("Only {0} results returned. There are still more results to return from the remote API side. Increase the 'limit' argument to obtain them.".format(str(limit)))

        # But return the response nonetheless
        return response
def getCustomers(textSearch=None, idOffset=None, textOffset=None, limit=10):
    """This in one of the simplest GET methods in the customer-controller section of the ThingsBoard API. With it I only need to provide a valid limit number and I can request a list of all registered customers in the platform so far,
    so that I can then populate my own MySQL database table with them.
    All parameters initialized to None in the method signature are OPTIONAL (textSearch, idOffset and textOffset). Those that were set to specific values and data types are MANDATORY (limit)
    @:type user_types allowed for this service: CUSTOMER_USER
    @:param textSearch (str) - Use this field to narrow down results based only in the 'name' field (which in this case should be the same as 'title', though the textSearch field only goes to the former). Yet, in order to yield any results,
    the textSearch field has to be exactly equal to whatever is in the 'name' field in the remote API (put case sensitive in this case...)
    @:param idOffset (str) - A similar field as the one before in the sense that it the sense that its search scope is limited to the 'id' fields. It provides a bit more of flexibility than the last one - id string can be inserted with, at most,
    11 of their last characters omitted and meaningful results are still returned. Any id string smaller than that results in a 'Invalid UUID string' errors.
    @:param textOffset (str) - I'm still at a loss as to what the hell this parameter does... This is the third API service that I process using a python module, with loads of testing using the ThingsBoard Swagger testing application and I still
    waiting for a test that can shed any light on what this... thing... really does. Leave it empty or write your favorite poem in it: its all the same for the remote API really...
    @:param limit (int) - Use this field to limit the number of results returned, regardless of other limiters around. If the limit field did truncates the set of returned results, the result dictionary is returned with its 'nextPageLink' key set
    to another dictionary describing just that and the 'hasNext' key is set to True. Otherwise, if all records were returned, 'nextPageLink' is set to NULL and 'hasNext' is returned set to False.
    @:raise utils.InputValidationException - For errors during the validation of inputs
    @:raise utils.ServiceEndpointException - For errors occurring during the interface with the remote API
    @:raise Exception - For any other types of errors
    @:return an HTTP response object containing the following result dictionary:
    {
        "data": [
            {
                customer_data_1
            },
            {
                customer_data_2
            },
            ...
            {
                customer_data_n
            }
        ],
        "nextPageLink": null or dict,
        "hasNext": bool
    }
    Each "customer_data" sub dictionary has the following format:
    customer_data = {
        "id": {
            "entityType": str,
            "id": str
        },
        "createTime": int,
        "additionalInfo": null or {
            "description": str
        },
        "country": str,
        "state": str,
        "city": str,
        "address": str,
        "address2": str,
        "zip": str,
        "phone": str,
        "email": str,
        "title": str,
        "tenantId": {
            "entityType": str,
            "id": str
        },
        "name": str
    }
    """
    customer_log = ambi_logger.get_logger(__name__)

    # Validate inputs
    try:
        utils.validate_input_type(limit, int)
        if textSearch:
            utils.validate_input_type(textSearch, str)
        if idOffset:
            utils.validate_input_type(idOffset, str)
        if textOffset:
            utils.validate_input_type(textOffset, str)
    except utils.InputValidationException as ive:
        customer_log.error(ive.message)
        raise ive

    if limit <= 0:
        error_msg = "Invalid limit provided: {0}. Please provide a value greater than zero for the limit value!".format(
            str(limit))
        customer_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    service_endpoint = "/api/customers?"

    url_strings = []

    if textSearch:
        textSearch = urllib.parse.quote(textSearch.encode('UTF-8')).replace(
            '/', '%2F')
        url_strings.append("textSearch=" + str(textSearch))
    if idOffset:
        idOffset = urllib.parse.quote(idOffset.encode('UTF-8')).replace(
            '/', '%2F')
        url_strings.append("idOffset=" + str(idOffset))
    if textOffset:
        textOffset = urllib.parse.quote(textOffset.encode('UTF-8')).replace(
            '/', '%2F')
        url_strings.append("textOffset=" + str(textOffset))
    url_strings.append("limit=" + str(limit))

    # Create the endpoint request string
    service_endpoint += '&'.join(url_strings)

    # Place the HTTP GET request using a REGULAR type authorization token
    service_dict = utils.build_service_calling_info(
        mac.get_auth_token(user_type='tenant_admin'), service_endpoint)

    # Query the remote API
    try:
        response = requests.get(url=service_dict['url'],
                                headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a request from {0}...".format(
            str(service_dict['url']))
        customer_log.error(error_msg)
        raise ce

    # Check the status code of the HTTP response before moving forward
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received an HTTP {0} with message {1}.".format(
            str(eval(response.text)['status']),
            str(eval(response.text)['message']))
        customer_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # Check the status of the 'hasNext' parameter returned
        if eval(utils.translate_postgres_to_python(response.text))['hasNext']:
            customer_log.warning(
                "Only {0} results returned. There are still more results to return from the remote API side. Increase the 'limit' argument to obtain them."
                .format(str(limit)))

        return response
def get_auth_token(user_type):
    """This is going to be the go-to method in this module. This method receives one of the supported user types ('SYS_ADMIN', 'TENANT_ADMIN' or 'CUSTOMER_USER') and fetches the respective authorization token. What this method does to get it is
    abstracted from the user. This method automatically checks the usual suspects first: database table. If there's any token in there for the provided user type, it then tests it to see if it is still valid. If not, it then tries to use the
    refresh token to issue a valid one and, if that is also not possible, request a new pair of authentication and refresh tokens.
    This method should be integrated into basic service calls to save the user to deal with the whole authorization token logistics
    @:param user_type (str) - One of the following supported user types: sys_admin, tenant_admin, customer_user (the case type of this argument is irrelevant because I will take care of it later on)
    @:raise utils.InputValidationException - If an invalid argument is provided
    @:raise utils.AuthenticationException - If the authentication credentials are not correct
    @:raise utils.ServiceEndpointException - If the call to the remote service fails
    @:raise mysql_utils.MySQLDatabaseException - If problems arise when dealing with the database
    @:return token (str) - A valid authorization token that can be used to authenticate a remote service call"""

    auth_token_log = ambi_logger.get_logger(__name__)

    # Validate the input as a data type and as one of the expected user types
    utils.validate_input_type(user_type, str)

    # Set the user type string to all lower case characters to simplify comparisons from this point on
    user_type = user_type.lower()
    supported_user_types = ['sys_admin', 'tenant_admin', 'customer_user']

    if user_type not in supported_user_types:
        raise utils.InputValidationException(
            "Invalid user type provided: '{0}'. Please provided one of these: {1}"
            .format(str(user_type), str(supported_user_types)))

    # All seems good so far. Lets check the database first
    database_name = user_config.mysql_db_access['database']
    table_name = proj_config.mysql_db_tables['authentication']

    cnx = mysql_utils.connect_db(database_name=database_name)
    select_cursor = cnx.cursor(buffered=True)
    change_cursor = cnx.cursor(buffered=True)

    # Grab the full column list from the database for indexing purposes
    column_list = mysql_utils.get_table_columns(database_name=database_name,
                                                table_name=table_name)

    # Lets see if there's any token already in the database
    sql_select = """SELECT token, refreshToken FROM """ + str(
        table_name) + """ WHERE user_type = %s;"""

    select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select,
                                                  (user_type, ))

    # Check if any results came back
    if select_cursor.rowcount > 0:
        # I got a valid token pair back. Extract the authorization from it
        auth_token = select_cursor.fetchone()[0]

        # And call the getUser service providing the retrieved token to see if a) the token is still valid and b) the user_type provided matches what the remote API sends back
        token_status_response = tb_auth_controller.getUser(
            auth_token=auth_token)

        # And convert the response body into the expected dictionary for easier access after this point.
        # NOTE: Interesting development in this case: it turns out that I can authorize a user using a authorization token that was issued from a different ThingsBoard installation!!! In other words, I can get a authorization token issued from my
        # local ThingsBoard installation and use it to get a "valid" authentication in the remote ThingsBoard installation. When I say "valid" I mean, the interface accepts the token without any kind of feedback regarding its actual validity. Yet,
        # when I execute a service with it, guess what? I do get a HTTP 200 response but without any data!! This doesn't make any sense and is going to complicate my code a lot! So I need to deal with this retarded cases too...
        # Attempt to do the following only if something was returned back in the text parameter of the response
        token_status_dict = None

        if token_status_response.text != "":
            token_status_dict = eval(
                utils.translate_postgres_to_python(token_status_response.text))

        # This particular annoying case in which a valid authorization token from a different installation is used in this case. In this case, the installation accepts the token, since it has the expected format, but internally it gets rejected
        # because the credential pair that originated it obviously doesn't match! But somehow the API fails to mention this! Instead, the damn thing accepts the token and even returns HTTP 200 responses to my requests but these come back all
        # empty, presumably because the internal authentication failed... because the tokens are wrong. Gee, what an unnecessary mess... If a case such as that is detected, simply get a new pair of tokens back. Most of these cases are solved by
        # forcing a token refresh
        if token_status_response.status_code != 200 or token_status_response.text == "":
            # Check the most usual case for a non-HTTP 200 return: HTTP 401 with sub-errorCode (its embedded in the response text) 11 - the authorization token has expired
            if token_status_response.status_code == 401 and eval(
                    token_status_response.text)['errorCode'] == 11:
                # Inform the user first
                auth_token_log.warning(
                    "The authorization token for user type = {0} retrieved from {1}.{2} is expired. Requesting new one..."
                    .format(str(user_type), str(database_name),
                            str(table_name)))
            elif token_status_response.text == "":
                auth_token_log.warning(
                    "The authorization provided was issued from a different ThingsBoard installation than this one! Need to issued a new pair..."
                )

            # Use the refresh token to retrieve a new authorization dictionary into the proper variable. No need to provide the refresh token: the tb_auth_controller.refresh_session_token method already takes care of retrieving it from the
            # database. Create a dictionary to call this method by setting all user_types to False except the one that I want
            new_auth_dict = {
                'sys_admin': False,
                'tenant_admin': False,
                'customer_user': False,
                user_type: True
            }

            # If I caught that annoying case in which a valid authorization token from a different ThingsBoard installation
            if token_status_response.text == "":
                new_auth_dict = tb_auth_controller.get_session_tokens(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])
                auth_token_log.info(
                    "Got a new pair of authorization tokens for the {0} ThingsBoard installation."
                    .format(str(user_config.access_info['host'])))
            # Otherwise, its an expired token case. Deal with it properly then
            # NOTE: The call to the refresh_session_token method already verifies and deals with expired refreshTokens too.
            else:
                new_auth_dict = tb_auth_controller.refresh_session_token(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])
                auth_token_log.info(
                    "Refreshed the authorization tokens for the {0} ThingsBoard installation."
                    .format(str(user_config.access_info['host'])))

            # From this point on, the process is the same for both cases considered above

            # If I got to this point, then my new_auth_dict has a fresh pair of authorization and refresh tokens under the user_type key entry (the previous call raises an Exception otherwise)
            # In this case, I have the tokens in the database expired. Update these entries before returning the valid authorization token
            sql_update = mysql_utils.create_update_sql_statement(
                column_list=column_list,
                table_name=table_name,
                trigger_column_list=['user_type'])

            # Prepare the data tuple for the UPDATE operation respecting the expected order: user_type, token, token_timestamp, refreshToken, refreshToken_timestamp and user_type again (because of the WHERE clause in the UPDATE)
            update_data_tuple = (user_type, new_auth_dict[user_type]['token'],
                                 datetime.datetime.now().replace(
                                     microsecond=0),
                                 new_auth_dict[user_type]['refreshToken'],
                                 datetime.datetime.now().replace(
                                     microsecond=0), user_type)

            # Execute the statement
            change_cursor = mysql_utils.run_sql_statement(
                change_cursor, sql_update, update_data_tuple)

            # And check the execution results
            if not change_cursor.rowcount:
                error_msg = "Could not update {0}.{1} with '{2}' statement...".format(
                    str(database_name), str(table_name),
                    str(change_cursor.statement))
                auth_token_log.error(error_msg)
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                raise mysql_utils.MySQLDatabaseException(message=error_msg)
            else:
                auth_token_log.info(
                    "Token database information for user_type = '{0}' updated successfully in {1}.{2}!"
                    .format(str(user_type), str(database_name),
                            str(table_name)))
                cnx.commit()
                # Close the database access objects and return the valid token then
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                return new_auth_dict[user_type]['token']
        # Check if the response returned has the user type (which would be under the 'authority' key in the response dictionary), matches the user_type provided (it would be quite weird if doesn't, but check it anyways)
        elif token_status_dict is not None and token_status_dict[
                'authority'].lower() != user_type:
            auth_token_log.warning(
                "Attention: the authorization token retrieved from {0}.{1} for user type '{2}' provided is actually associated with a '{3}' user type! Resetting..."
                .format(str(database_name), str(table_name), str(user_type),
                        str(token_status_dict['authority'])))
            # Mismatch detected. Assuming that the ThingsBoard API only accepts user types from the set defined and since I've validated the user type provided as argument also, this means that my mismatch is at the MySQL database level,
            # that somehow has a valid authentication token submitted under a valid user type, just not the correct one
            # First, update the user_type in the database for the correct one (the one retrieved from the remote API)
            remote_user_type = token_status_dict['authority']
            # Request an UPDATE SQL template to replace the current user type by the correct remote_user_type
            sql_update = mysql_utils.create_update_sql_statement(
                column_list=['user_type'],
                table_name=table_name,
                trigger_column_list=['user_type'])
            data_tuple = (remote_user_type, user_type)

            # Execute the statement
            change_cursor = mysql_utils.run_sql_statement(
                change_cursor, sql_update, data_tuple)

            # Check if something was done
            if not change_cursor.rowcount:
                error_msg = "Update operation '{0}' in {1}.{2} not successful!".format(
                    str(change_cursor.statement), str(database_name),
                    str(table_name))
                auth_token_log.error(error_msg)
                change_cursor.close()
                select_cursor.close()
                cnx.close()
                raise mysql_utils.MySQLDatabaseException(message=error_msg)
            else:
                # Commit the changes, warn the user, request a new authentication token for the original user_type requested, save it in the database (in a new entry given that the last one was changed) and return the valid authorization token
                # back, which should always be what this method does before exiting (either this or raise an Exception)
                cnx.commit()

                auth_token_log.warning(
                    "Successfully updated user_type = {0} entry to {1} in {2}.{3}. Requesting new authorization token to {0}..."
                    .format(str(user_type), str(remote_user_type),
                            str(database_name), str(table_name)))

                # Set out the flags for the new session token request, setting all user_types to False at first but then switching on to True only the one matching the provided user_type
                new_auth_dict = {
                    'sys_admin': False,
                    'tenant_admin': False,
                    'customer_user': False,
                    user_type: True
                }

                # And now I can request a new session token for only the user_type that I need without having to explicit a different call signature for each possible case. Clever!
                new_auth_dict = tb_auth_controller.get_session_tokens(
                    sys_admin=new_auth_dict['sys_admin'],
                    tenant_admin=new_auth_dict['tenant_admin'],
                    customer_user=new_auth_dict['customer_user'])

                # If I got here, it means that I have a new authorization dictionary with all entries set to None except the one corresponding to the requested user_type. Update the database and return the token back to the user. Since the
                # new_auth_dict is properly filled, I can now ignore the rest of this if-else jungle. The fact that new_auth_dict is not None anymore is going to trigger an INSERT operation with its data into the database
                pass

        # The HTTP status code is a nice 200 OK. Nothing to do but to return the valid token
        else:
            auth_token_log.info(
                "Got a still valid authorization token for user type {0} from {1}.{2}."
                .format(str(user_type), str(database_name), str(table_name)))
            # Close the database structures before returning the token
            select_cursor.close()
            change_cursor.close()
            cnx.close()
            return auth_token

    else:
        # If I get to this point it means that no valid authorization token was found so far in the database. Yet, there is a possibility that some other token request may have be been placed in the logic above and now it needs the data retrieved to
        # be sent to the database. I can detect this by looking at the new_auth_dict variable. If its None, it means that I need to request a new pair of tokens for this user_type.
        # Create a base for the new authorization dictionary by setting all user_types to False initially and then triggering just the one that needs new authorization tokens to True
        new_auth_dict = {
            'sys_admin': False,
            'tenant_admin': False,
            'customer_user': False,
            user_type: True
        }

        # And use this to request a new pair of authorization tokens from the remote API
        new_auth_dict = tb_auth_controller.get_session_tokens(
            sys_admin=new_auth_dict['sys_admin'],
            tenant_admin=new_auth_dict['tenant_admin'],
            customer_user=new_auth_dict['customer_user'])

    # In any case, I should have a new_auth_dict dictionary here with one entry filled in with a valid authorization token. Time to add it to the database
    sql_insert = mysql_utils.create_insert_sql_statement(
        column_list=column_list, table_name=table_name)

    # And create the data tuple by replacing the members in the column_list retrieved before by the corresponding values
    column_list[column_list.index('user_type')] = user_type
    column_list[column_list.index('token')] = new_auth_dict[user_type]['token']
    column_list[column_list.index(
        'token_timestamp')] = datetime.datetime.now().replace(microsecond=0)
    column_list[column_list.index(
        'refreshToken')] = new_auth_dict[user_type]['refreshToken']
    column_list[column_list.index(
        'refreshToken_timestamp')] = datetime.datetime.now().replace(
            microsecond=0)

    # Execute the statement
    change_cursor = mysql_utils.run_sql_statement(change_cursor, sql_insert,
                                                  tuple(column_list))

    if not change_cursor.rowcount:
        error_msg = "Failed to execute '{0}' in {1}.{2}. Exiting...".format(
            str(change_cursor.statement), str(database_name), str(table_name))
        auth_token_log.error(error_msg)
        change_cursor.close()
        select_cursor.close()
        cnx.close()
        raise mysql_utils.MySQLDatabaseException(message=error_msg)
    else:
        cnx.commit()
        auth_token_log.info(
            "Added authorization token from user_type = {0} to {1}.{2} successfully!"
            .format(str(user_type), str(database_name), str(table_name)))
        # Return the token then
        select_cursor.close()
        change_cursor.close()
        cnx.close()
        return new_auth_dict[user_type]['token']
Example #11
0
def getTenantDevices(type=None, textSearch=None, sortProperty=None, sortOrder=None, pageSize=10, page=10):
    """GET method to retrieve the list of devices with their associations, namely Tenants and Customers. The indexer of the returned list is the DEVICE (or its id to be more precise).
    @:type user_types allowed for this service: CUSTOMER_USER
    @:param type (str) - Use this field to narrow down the type of device to return. The type referred in this field is the custom device type defined by the user upon its creation (e.g., 'Thermometer', 'Water meter' and so on) and this field is
    ultra sensitive. If a device type is defined as 'Thermometer', providing type = 'thermometer' doesn't return any results just because the uppercase difference. So, in order to be used, the caller must know precisely which types of devices were
    defined in the system so far.
    @:param textSearch (str) - Use this field to narrow down results based only in the 'name' field. Like the previous parameter, the string inserted in this field has to be exactly identical to what is in a record's 'name' field to return any
    results. For example, if a device is named 'Water Meter A2', just using 'water Meter A2' instead of the exact string (upper/lower case respected) is enough to get an empty set as response
    @:param idOffset (str) - A similar field as the two before in the sense that its search scope is limited to 'id' fields. But in this particular case, since a device can be potentially associated to several types of other ids (a single device
    can be
    associated to multiple tenants and/or multiple customers, each having its id value explicit in the records), a search using this parameter can result in all devices with a given id, along with their associated tenants and customers if the id
    in the argument belongs to a device, or it can return all devices associated to a particular tenant or customer if the id string provided is of this type. Also, unlike the previous fields, this one allows searches for partial id strings (but
    only if part of the last segment of the id string are omitted. More than that yields no results whatsoever).
    @:param textOffset (str) - Still no clue what this field does... Leave it empty or write your whole life story in it and it always returns the full result set... (If none of the other fields are filled)
    @:param limit (int) - Use this field to limit the number of results returned, regardless of other limiters around (the other fields of the method). If the limit field did truncated the results returned, the result dictionary is returned with
    the 'nextPageLink' key set to another dictionary describing just that and the 'hasNext' key is set to True. Otherwise, if all record were returned, 'nextPageLink' is set to NULL and 'hasNext' comes back set to False.
    @:raise utils.InputValidationException - For errors during the validation of inputs
    @:raise utils.ServiceEndpointException - For errors during the API operation
    @:raise Exception - For any other types of errors
    @:return A HTTP response object containing the following result dictionary (if the API call was successful):
    {
        "data": [
            {
               device_1_data
           },
           {
               device_2_data
           },
           ...
           {
               device_n_data
           }],
    "nextPageLink": null,
    "hasNext": false
    }
    Each element of the 'data' key associated list is the description of a single device in the database using the following format:
    {
      "id": {
        "entityType": str,
        "id": str
      },
      "createdTime": int,
      "additionalInfo": str,
      "tenantId": {
        "entityType": str,
        "id": str
      },
      "customerId": {
        "entityType": str,
        "id": str
      },
      "name": str,
      "type": str,
      "label": str
    }
    The way that ThingsBoard manages these devices internally guarantees that a single device can only be associated to a single tenant and a single customer, which simplifies quite a lot the logic that I need to take to process this data later on
     """
    tenant_device_log = ambi_logger.get_logger(__name__)

    # Validate inputs
    # Start by the mandatory ones (only the limit)
    # utils.validate_input_type(limit, int)
    utils.validate_input_type(pageSize, int)
    utils.validate_input_type(page, int)

    # And then go for the optional ones
    if type:
        utils.validate_input_type(type, str)
    if textSearch:
        utils.validate_input_type(textSearch, str)
    # if idOffset:
    #    utils.validate_input_type(idOffset, str)
    # if textOffset:
    #    utils.validate_input_type(textOffset, str)
    if sortProperty:
        utils.validate_input_type(sortProperty, str)
    if sortOrder:
        utils.validate_input_type(sortOrder, str)

    # Check the number passed in limit: zero or negative values are not allowed by the API
    # if limit <= 0:
    if pageSize <= 0 or page < 0:
        error_msg = "Invalid page or page size provided: {0}. Please provide a positive, greater than zero page or page size value!".format(str(pageSize))
        tenant_device_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Start with the base endpoint
    service_endpoint = "/api/tenant/devices?"

    url_strings = []

    # Lets start building the API request strings to add to the endpoint
    if type:
        # Escape the string to URL-esque before adding it to the main service endpoint URL string as well as the forward slashes to '%2F' given that the quote method doesn't do that
        url_type = "type=" + urllib.parse.quote(type.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_type)

    if textSearch:
        url_textSearch = "textSearch=" + urllib.parse.quote(textSearch.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textSearch)

    """
    if idOffset:
        url_idOffset = "idOffset=" + urllib.parse.quote(idOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_idOffset)

    if textOffset:
        url_textOffset = "textOffset=" + urllib.parse.quote(textOffset.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_textOffset)
    """
    if sortProperty:
        url_sortProperty = "sortProperty=" + urllib.parse.quote(sortProperty.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_sortProperty)

    if sortOrder:
        url_sortOrder = "sortOrder=" + urllib.parse.quote(sortOrder.encode('UTF-8')).replace('/', '%2F')
        url_strings.append(url_sortOrder)

    # url_strings.append("limit=" + str(limit))

    url_strings.append("pageSize=" + str(pageSize))
    url_strings.append("page=" + str(page))

    # Concatenate all the url_strings elements into single string, each individual element separated by '&' as expected by the remote API and appended to the base service endpoint
    service_endpoint += '&'.join(url_strings)

    # Get the standard service dictionary from the utils method

    service_dict = utils.build_service_calling_info(mac.get_auth_token('tenant_admin'), service_endpoint)

    # Try to get a response from the remote API
    try:
        response = requests.get(url=service_dict['url'], headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(str(service_dict['url']))
        tenant_device_log.error(error_msg)
        raise ce

    # If I got a response, check first if it was the expected HTTP 200 OK
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received an HTTP " + str(eval(response.text)['status']) + " with message: " + str(eval(response.text)['message'])
        tenant_device_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # Before sending the result back, check first the status of the 'hasNext' key in the result dictionary and inform the user that, if it is True, there are results still left to return in the remote API server
        if eval(utils.translate_postgres_to_python(response.text))['hasNext']:
            tenant_device_log.warning("Only {0} results returned. There are still more results to return from the remote API side. Increase the 'limit' argument to obtain them.".format(str(limit)))

        return response
Example #12
0
def getCustomerDevices(customer_name, type=None, textSearch=None, idOffset=None, textOffset=None, limit=50):
    """Method that executes a GET request to the device-controller.getCustomerDevice service to the remote API in order to obtain a list of devices associated with the customer identified by 'customer_name'. For now, this method then sends that
    information to be used to update the ambiosensing_thingsboard.thingsboard_devices_tables. This method is but a subset of the getTenantDevices method from this own module in the sense that, by specifying a user during the method call,
    the list of devices returned is limited to just the devices assigned to this customer while the getTenantDevices returns the list of all devices, as long as they are assigned to a tenant, regardless of whom that tenant may be.
    @:type user_types allowed for this service: TENANT_ADMIN, CUSTOMER_USER
    @:param customer_name (str) - The name of the customer as it was defined in its registration in the ThingsBoard interface. This parameter is going to be use to perform SELECT operations in the MySQL database using 'LIKE' clauses so,
    unlike some of the fields in the API service requests, there's some flexibility here for using names that are not exactly identical to what is in the database. The search is going to be applied to the 'name' column of the
    thingsboard_customers_table. Retrieved customer records via this interface are then used to build the service call to the remote API
    @:param type (str) - Use this field to narrow down results based on the type of device to return. The type field is set during the device registration in the ThingsBoard platform and can then be used later to associate various devices to the
    same type (e.g., 'Thermometer', 'luximeter', etc..). The search operation is case-sensitive, i.e., only complete type matches are returned.
    @:param textSearch (str) - Use this field to narrow down the number of returned results based on the 'name' field. Like the previous field, this one is also case-sensitive (only identical matches return results)
    @:param idOffset (str) - Another search field based on the 'id' parameter this time. It does provide just a little bit of flexibility when compared with previous search fields, in the sense that it accepts and processes incomplete id strings,
    as long as some (but not all) of the 12 character segment of its last block are omitted.
    @:param textOffset (str) - Still no clue on what this might be used for...
    @:param limit (int) - Use this field to truncate the number of returned results. If the result set returned from the remote API was truncated for whatever reason, the result dictionary is returned with another dictionary under the
    'nextPageLink' key detailing the results still to be returned and the 'hasNext' key set to True. Otherwise 'nextPageLink' is set to NULL and 'hasNext' to False
    @:raise utils.InputValidationException - For errors during the validation of inputs
    @:raise utils.ServiceEndpoointException - For error during the remote API access
    @:raise Exception - For any other errors
    @:return A HTTP response object containing the following result dictionary:
    {
        "data": [
            {
                customer_device_1_data
            },
            {
                customer_device_2_data
            },
            ...
            {
                customer_device_n_data
            }
        ],
        "nextPageLink": null,
        "hasNext": false
    }

    Each customer_device_data element is a dictionary in the following format:
    customer_device_n_data = {
        "id": {
            "entityType": str,
            "id": str
        },
        "createdTime": int,
        "additionalInfo": null or {
            "description": str
        },
        "tenantId": {
            "entityType": str,
            "id": str
        },
        "customerId": {
            "entityType": str,
            "id": str
        },
        "name": str,
        "type": str,
        "label": str
    }
    """

    customer_device_log = ambi_logger.get_logger(__name__)
    module_table_key = 'customers'
    columns_to_retrieve = ['id']

    # Validate inputs
    try:
        # Start by the mandatory ones first
        utils.validate_input_type(customer_name, str)
        utils.validate_input_type(limit, int)
        if type:
            utils.validate_input_type(type, str)
        if textSearch:
            utils.validate_input_type(textSearch, str)
        if idOffset:
            utils.validate_input_type(idOffset, str)
        if textOffset:
            utils.validate_input_type(textOffset, str)
    except utils.InputValidationException as ive:
        customer_device_log.error(ive.message)
        raise ive

    # Check the number passed in the limit argument for consistency
    if limit <= 0:
        error_msg = "Invalid limit provided: {0}. Please provide a greater than zero limit value!".format(str(limit))
        customer_device_log.error(error_msg)
        raise utils.InputValidationException(message=error_msg)

    # Before going any further, there a limiting factor here: the customer id. I need to use the customer_name parameter to fetch it through a database consultation. The customer search is going to be an exhaustive one: I'll first try to search
    # for the customer_name that was passed as is. If a single result is returned - the desired outcome - cool, move on. If not, try to add a wildcard character at the end of customer_name (customer_name%), then to just the beginning (
    # %customer_name) and, if I still can't find a single result, try one last time with wildcard on both ends of the string (%customer_name%) in order to get an unique record (multiple records returned are also disregarded). If no clear answer is
    # obtained thus far, raise an Exception with this information
    # Connect to the MySQL database
    cnx = mysql_utils.connect_db(user_config.mysql_db_access['database'])

    # And get a buffered cursor to run SQL statements
    select_cursor = cnx.cursor(buffered=True)

    # Build the SQL SELECT statement to execute in the MySQL database context
    sql_select = """SELECT """ + ", ".join(columns_to_retrieve) + """ FROM """ + str(proj_config.mysql_db_tables[module_table_key]) + """ WHERE name LIKE %s;"""

    # Run the statement and check what comes back
    select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str(customer_name),))

    # If I got a single result from the last SQL execution, I don't need to retrieve the record itself to check it: the cursor retains the number of records found in the statement that was just executed in its rowcount internal variable (which is
    # effectively the same as running a SELECT COUNT(*) instead)

    if select_cursor.rowcount != 1:
        # If the last statement failed, try again with a wildcard character at the end of the customer_name
        customer_device_log.warning("Unable to get an unique result searching for a customer_name = {0} (got {1} results instead). Trying again using customer_name = {2}..."
                                    .format(str(customer_name), str(select_cursor.rowcount), str(customer_name + "%")))
        select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str(customer_name + "%"),))

        if select_cursor.rowcount != 1:
            customer_device_log.warning("Unable to get an unique result searching for a customer_name = {0} (got {1} result instead). Trying again using customer_name = {2}..."
                                        .format(str(customer_name + "%"), str(select_cursor.rowcount), str("%" + customer_name)))
            select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str("%" + customer_name),))

            if select_cursor.rowcount != 1:
                customer_device_log.warning("Unable to get an unique result searching for a customer_name = {0} (got {1} result instead). Trying again using customer_name = {2}..."
                                            .format(str("%" + customer_name), str(select_cursor.rowcount), str("%" + customer_name + "%")))
                select_cursor = mysql_utils.run_sql_statement(select_cursor, sql_select, (str("%" + customer_name + "%"),))

                if select_cursor.rowcount != 1:
                    error_msg = "The method was unable to retrieve an unique record for customer_name = {0} (got {1} results instead). Nowhere to go but out now..."\
                        .format(str("%" + customer_name + "%"), str(select_cursor.rowcount))
                    customer_device_log.error(error_msg)
                    exit(-1)

    # If my select_cursor was able to go through the last flurry of validation, retrieve the result obtained
    result = select_cursor.fetchone()

    # The SQL SELECT result returns records as n-element tuples, n the number of columns returned. The SQL statement in this method queries for a single column: 'id', so any result returned should be a single element tuple
    customer_id = str(result[0])

    # I now have everything that I need to place a call to the remote API service. Build the service endpoint
    service_endpoint = "/api/customer/{0}/devices?".format(customer_id)

    url_strings = []
    if type:
        # Don't forget to escape the url strings characters to URL-compatible characters, including the '/' character for '%2F'
        url_strings.append("type=" + urllib.parse.quote(type.encode('UTF-8')).replace('/', '%2F'))
    if textSearch:
        url_strings.append("textSearch=" + urllib.parse.quote(textSearch.encode('UTF-8')).replace('/', '%2F'))
    if idOffset:
        url_strings.append("idOffset=" + urllib.parse.quote(idOffset.encode('UTF-8')).replace('/', '%2F'))
    if textOffset:
        url_strings.append("textOffset=" + urllib.parse.quote(textOffset.encode('UTF-8')).replace('/', '%2F'))
    url_strings.append("limit=" + str(limit))

    # Concatenate all the gathered url_strings together with the rest of the service_endpoint, using '&' as a separator
    service_endpoint += '&'.join(url_strings)

    # Get the request dictionary using a REGULAR type authorization token
    service_dict = utils.build_service_calling_info(mac.get_auth_token(user_type='tenant_admin'), service_endpoint)

    # Query the remote API
    try:
        response = requests.get(url=service_dict['url'], headers=service_dict['headers'])
    except (requests.exceptions.ConnectionError, requests.exceptions.ConnectTimeout) as ce:
        error_msg = "Could not get a response from {0}...".format(str(service_dict['url']))
        customer_device_log.error(error_msg)
        raise utils.ServiceEndpointException(message=ce)

    # If I got a response, check first if it was the expected HTTP 200 OK
    if response.status_code != 200:
        error_msg = "Request unsuccessful: Received an HTTP " + str(eval(response.text)['status']) + " with message: " + str(eval(response.text)['message'])
        customer_device_log.error(error_msg)
        raise utils.ServiceEndpointException(message=error_msg)
    else:
        # I got a valid results, it appears. Check if the number of results returned was truncated by the limit parameter. If so, warn the user only (there's no need to raise Exceptions on this matter)
        # Translate the results to Python-speak first before going for the comparison given that this result set was returned from a MySQL backend
        if eval(utils.translate_postgres_to_python(response.text))['hasNext']:
            customer_device_log.warning("Only {0} results returned. There are still results to return from the remote API side. Increase the 'limit' argument to obtain them.".format(str(limit)))

    # I'm good then. Return the result set back
    return response
Example #13
0
def update_devices_table(customer_name=False):
    """The logic behind this module is quite similar to the one employed in the update_tenant_table(): it gets a similar data structure in (with all the same annoying problems), has to do the same kind of processing and so on. As with the other
    method, I'm going to write a insert and an update methods that can call each other depending on the context: both methods detect what is going on in the database and then act accordingly.
    @:param customer_name (str) - OPTIONAL parameter. There are essentially multiple ways to retrieve device dictionary data from the remote API. So far, I've created support for retrieving device data from the getTenantsDevices method,
    using just tenants data, and the getCustomerDevices, using customer data instead. Overall, the data returned comes in the same format in both cases, hence why I want to use just one method to process the device data. The difference is,
    using tenant data only retrieves devices that are associated to a tenant, as well as using customer data only returns devices associated to a customer. So, ideally, I should use both methods and merge the resulting list before running the
    table_updater method. The problem is that the customer based method requires a customer_id that is retrieved using a more memorable customer_name (and also because I've implemented a more flexible way to retrieve this data when the complete
    customer_name is not completely known), so I can only get the additional result sets if the customer_name is passed on to this method. So, if this argument is omitted, this method uses just the tenant data. If not, both data sets are retrieved.
    @:raise utils.InputValidationException - If the inputs fail validation
    @:raise Exception - If other errors occur.
    """
    module_table_key = 'devices'
    # Use the same limit value for both calls
    # limit = 50
    pageSize = 100
    page = 0
    update_devices_log = ambi_logger.get_logger(__name__)  # __name__ is a method fingerprint (from python native) and contains a unique path

    # Get the base response using just tenant data
    # tenant_response = tb_device_controller.getTenantDevices(limit=limit)
    tenant_response = tb_device_controller.getTenantDevices(pageSize=pageSize, page=page)

    # Translate the stuff that comes from the ThingsBoard API as PostGres-speak to Python-speak before forwarding the data
    tenant_response_dict = eval(utils.translate_postgres_to_python(tenant_response.text))  # Converts the response.text into a dictionary (eval is a python native method :D )

    # Test if all results came back with the current limit setting
    if tenant_response_dict['hasNext']:
        update_devices_log.warning("Not all results from the remote API were returned on the last call using tenant data (limit = {0}). Raise the limit parameter to retrieve more of them.".format(str(limit)))

    # Extract the device data to a list
    tenant_device_list = tenant_response_dict['data']

    customer_device_list = None
    # Check if its possible to use the customer data too to retrieve customer associated devices
    if customer_name:
        # Validate it first
        try:
            utils.validate_input_type(customer_name, str)
        except utils.InputValidationException as ive:
            update_devices_log.error(ive.message)
            raise ive

        # Input validated. Proceed to query the API using the same limit
        customer_response = tb_device_controller.getCustomerDevices(customer_name=customer_name, limit=limit)

        # Translate it to Python and cast the response to a dictionary
        customer_response_dict = eval(utils.translate_postgres_to_python(customer_response.text))

        # Test if the customer bound results were truncated by the limit value
        if customer_response_dict['hasNext']:
            update_devices_log.warning("Not all results from the remote API were returned on the last call using customer data (limit = {0}). Raise the limit parameter to retrieve more of them.".format(str(limit)))

        # Extract the records into a list
        customer_device_list = customer_response_dict['data']

    # And then try to add it, one by one, to the database table. The lazy way to do this is to add records indiscriminately to the database and let it, along with the database_table_updater module that is called to do just that,
    # to sort through possible repeated records (devices can be associated to customers and to tenants simultaneously) with their internal tools. But fortunately I've detected that both device data retrieval methods use the exact same data
    # structure to format these results, hence a quick comparison should be enough to be able to collate a list with only one record per device in it

    # Start by picking the tenant device list as the default
    device_list = tenant_device_list

    # If another list was retrieved from the customer data too
    if customer_device_list:
        # Go through all the customer associated devices
        for customer_device in customer_device_list:
            # And if a record is deemed repeated
            if customer_device in device_list:
                # Ignore it and move to the next one in line
                continue
            # Otherwise
            else:
                # Add it to the main list
                device_list.append(customer_device)

    # The rest of the code should work with either just a tenant based device list or one with also customer based devices
    for device in device_list:
        # Unlike the tenant processing method, the devices data has a couple of redundant fields that I decide to remove for sake of simplicity. Namely, the result dictionary for each device entry returns two keys: tenantId and CustomerId which
        # are sub-dictionaries with the format {'entityType': string, 'id': string}. I'm only interested in the id field (because I can use it later to do JOIN statements using the id field and main correlation). The entityType associated value
        # for those case is 'TENANT' and 'CUSTOMER', which is a bit redundant given that it is already implicit in the parent key. As such, I decided to create database columns named respectively tenantId and customerId but are set to VARCHAR type
        # to store just the id string. So, for this to work later on I need to replace these sub-dictionaries by just the id strings. Otherwise the list of values is not going to match the number of database columns
        device['tenantId'] = device['tenantId']['id']
        device['customerId'] = device['customerId']['id']

        # I still have one more customization to do in this service. Subsequent calls for device data from the ThingsBoard remote API require 5 specific and mandatory elements: the entityType, entityId, timeseriesKey,
        # startTimestamp and endTimestamp. The first 2 are covered by the device_controller.getTenantDevices method and the last 2 are set by the user (not method dependent). So I'm only missing the timeseriesKey at this point to be able to do
        # bulk requests for device data. To obtain that, I need to place a specific call to a remote API service, namely the telemetry_controller.getTimeseriesKey method. This method requires the device's entityKey and entityId that were just
        # returned from the previous API call. This method, if correctly call, returns a single string with the name that the PostGres database from the API side is using to store the device data. It is not optimal to create a single table with
        # just a column with this data (or even with two additional entityType and entityId) when I can simply add a new one to the existing thingsboard_devices_table and place a call at this point to the other API service that returns just that
        # and concatenate it to the existing data dictionary. The thingsboard_devices_table already has an 'extra' column names timeseriesKey at the end to include this element so now its just a matter of putting it into the dictionary to return.
        # Humm... it seems that there are sensors that can provide readings from multiple sources (the device can be a multi-sensor array that uses a single interface to communicate
        # The return from the next statement is always a list with as many elements as the number of supported timeSeriesKeys by the device identified (one per supported reading/sensor)
        timeseries_keys = tb_telemetry_controller.getTimeseriesKeys(device['id']['entityType'], device['id']['id'])

        # The database entry needs to contain all elements returned in the list in timeseries_keys variable
        # Add an extra entry to the device dictionary to be used on the database population operation. The str.join() operation is going to concatenate all elements in the timeseries_keys using a comma to separate them in a single string.
        # Also, this approach has the advantage that if an API request is built from this data (retrieved from the database of course), this format allows for a direct call - no post processing required at all. This is because of how these types
        # of remote API requests are constructed: the endpoint request takes a query for multiple timeseries keys from one single device as comma-separated strings with no spaces.
        # For example, querying for the timeseries values from a device with 4 sensors attached that can produce 4 types of different readings implies the following endpoint to be sent in a URL to the remote service:
        # http://localhost:8080/api/plugins/telemetry/DEVICE/3f0e8760-3874-11ea-8da5-2fbefd4cb87e/values/timeseries?limit=3&agg=NONE&keys=humidity,temperature,pressure,lux&startTs=1579189110790&endTs=1579193100786
        # The 'keys' part of the last string shows how this request must be constructed and that implies all parameters in a single string, separated by commas and without any spaces in between.
        device['timeseriesKeys'] = ",".join(timeseries_keys) if hasattr(timeseries_keys, "__iter__") else ""
        # Done. Carry on with the database stuff

        # Same old, same old. Expand the device dictionary to a single level one first
        device = utils.extract_all_key_value_pairs_from_dictionary(input_dictionary=device)

        # And replace any annoying POSIX timestamps for datetime.datetime objects
        try:
            device['createdTime'] = mysql_utils.convert_timestamp_tb_to_datetime(timestamp=device['createdTime'])
        except KeyError:
            pass

        database_table_updater.add_table_data(device, proj_config.mysql_db_tables[module_table_key])