Example #1
0
class StorageTableContext():
    """Initializes the repository with the specified settings dict.
        Required settings in config dict are:
        - AZURE_STORAGE_NAME
        - STORAGE_KEY
    """
    
    _models = []
    _encryptproperties = False
    _encrypted_properties = []
    _tableservice = None
    _storage_key = ''
    _storage_name = ''

    def __init__(self, **kwargs):

        self._storage_name = kwargs.get('AZURE_STORAGE_NAME', '')
        self._storage_key = kwargs.get('AZURE_STORAGE_KEY', '')

        """ service init """
        self._models = []
        if self._storage_key != '' and self._storage_name != '':
            self._tableservice = TableService(account_name = self._storage_name, account_key = self._storage_key, protocol='https')

        """ encrypt queue service """
        if kwargs.get('AZURE_REQUIRE_ENCRYPTION', False):

            # Create the KEK used for encryption.
            # KeyWrapper is the provided sample implementation, but the user may use their own object as long as it implements the interface above.
            kek = KeyWrapper(kwargs.get('AZURE_KEY_IDENTIFIER', 'otrrentapi'), kwargs.get('SECRET_KEY', 'super-duper-secret')) # Key identifier

            # Create the key resolver used for decryption.
            # KeyResolver is the provided sample implementation, but the user may use whatever implementation they choose so long as the function set on the service object behaves appropriately.
            key_resolver = KeyResolver()
            key_resolver.put_key(kek)

            # Set the require Encryption, KEK and key resolver on the service object.
            self._encryptproperties = True
            self._tableservice.key_encryption_key = kek
            self._tableservice.key_resolver_funcion = key_resolver.resolve_key
            self._tableservice.encryption_resolver_function = self.__encryptionresolver__


        pass

    def __createtable__(self, tablename) -> bool:
        if (not self._tableservice is None):
            try:
                self._tableservice.create_table(tablename)
                return True
            except AzureException as e:
                log.error('failed to create {} with error {}'.format(tablename, e))
                return False
        else:
            return True
        pass

    # Define the encryption resolver_function.
    def __encryptionresolver__(self, pk, rk, property_name):
        if property_name in self._encrypted_properties:
            return True
            #log.debug('encrypt field {}'.format(property_name))
        
        #log.debug('dont encrypt field {}'.format(property_name))
        return False

    def register_model(self, storagemodel:object):
        modelname = storagemodel.__class__.__name__     
        if isinstance(storagemodel, StorageTableModel):
            if (not modelname in self._models):
                self.__createtable__(storagemodel._tablename)
                self._models.append(modelname)

                """ set properties to be encrypted client side """
                if self._encryptproperties:
                    self._encrypted_properties += storagemodel._encryptedproperties

                log.info('model {} registered successfully. Models are {!s}. Encrypted fields are {!s} '.format(modelname, self._models, self._encrypted_properties))      
        pass

    def table_isempty(self, tablename, PartitionKey='', RowKey = '') -> bool:
        if  (not self._tableservice is None):

            filter = "PartitionKey eq '{}'".format(PartitionKey) if PartitionKey != '' else ''
            if filter == '':
                filter = "RowKey eq '{}'".format(RowKey) if RowKey != '' else ''
            else:
                filter = filter + ("and RowKey eq '{}'".format(RowKey) if RowKey != '' else '')
            try:
                entities = list(self._tableservice.query_entities(tablename, filter = filter, select='PartitionKey', num_results=1))
                if len(entities) == 1: 
                    return False
                else:
                    return True

            except AzureMissingResourceHttpError as e:
                log.debug('failed to query {} with error {}'.format(tablename, e))
                return True

        else:
            return True
        pass

    def exists(self, storagemodel) -> bool:
        exists = False
        if isinstance(storagemodel, StorageTableModel):
            modelname = storagemodel.__class__.__name__
            if (modelname in self._models):
                if storagemodel._exists is None:
                    try:
                        entity = self._tableservice.get_entity(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey)
                        storagemodel._exists = True
                        exists = True
            
                    except AzureMissingResourceHttpError:
                        storagemodel._exists = False
                else:
                    exists = storagemodel._exists
            else:
                log.debug('please register model {} first'.format(modelname))
                        
        return exists       

    def get(self, storagemodel) -> StorageTableModel:
        """ load entity data from storage to vars in self """

        if isinstance(storagemodel, StorageTableModel):
            modelname = storagemodel.__class__.__name__
            if (modelname in self._models):
                try:
                    entity = self._tableservice.get_entity(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey)
                    storagemodel._exists = True
        
                    """ sync with entity values """
                    for key, default in vars(storagemodel).items():
                        if not key.startswith('_') and key not in ['','PartitionKey','RowKey']:
                            value = getattr(entity, key, None)
                            if not value is None:
                                setattr(storagemodel, key, value)
             
                except AzureMissingResourceHttpError as e:
                    log.debug('can not get table entity:  Table {}, PartitionKey {}, RowKey {} because {!s}'.format(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey, e))
                    storagemodel._exists = False

                except Exception as e:
                    log.debug('can not get table entity:  Table {}, PartitionKey {}, RowKey {} because {!s}'.format(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey, e))
                    storagemodel._exists = False

            else:
                log.debug('please register model {} first to {!s}'.format(modelname, self._models))

            return storagemodel

        else:
            return None

    def insert(self, storagemodel) -> StorageTableModel:
        """ insert model into storage """
        if isinstance(storagemodel, StorageTableModel):
            modelname = storagemodel.__class__.__name__
            if (modelname in self._models):
                try:            
                    self._tableservice.insert_or_replace_entity(storagemodel._tablename, storagemodel.entity())
                    storagemodel._exists = True

                except AzureMissingResourceHttpError as e:
                    log.debug('can not insert or replace table entity:  Table {}, PartitionKey {}, RowKey {} because {!s}'.format(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey, e))
            else:
                log.debug('please register model {} first'.format(modelname))

            return storagemodel
        else:
            return None

    def merge(self, storagemodel) -> StorageTableModel:
        """ try to merge entry """
        if isinstance(storagemodel, StorageTableModel):
            modelname = storagemodel.__class__.__name__
            if (modelname in self._models):
                try:            
                    self._tableservice.insert_or_merge_entity(storagemodel._tablename, storagemodel.entity())
                    storagemodel._exists = True

                except AzureMissingResourceHttpError as e:
                    log.debug('can not insert or merge table entity:  Table {}, PartitionKey {}, RowKey {} because {!s}'.format(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey, e))
            else:
                log.debug('please register model {} first'.format(modelname))

            return storagemodel
        else:
            return None
    
    def delete(self,storagemodel):
        """ delete existing Entity """
        if isinstance(storagemodel, StorageTableModel):
            modelname = storagemodel.__class__.__name__
            if (modelname in self._models):
                try:
                    self._tableservice.delete_entity(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey)
                    storagemodel._exists = False

                except AzureMissingResourceHttpError as e:
                    log.debug('can not delete table entity:  Table {}, PartitionKey {}, RowKey {} because {!s}'.format(storagemodel._tablename, storagemodel.PartitionKey, storagemodel.RowKey, e))

            else:
                log.debug('please register model {} first'.format(modelname))

            return storagemodel
        else:
            return None


    def __changeprimarykeys__(self, PartitionKey = '', RowKey = ''):
        """ Change Entity Primary Keys into new instance:

            - PartitionKey and/or
            - RowKey
        """

        PartitionKey = PartitionKey if PartitionKey != '' else self._PartitionKey
        RowKey = RowKey if RowKey != '' else self._RowKey

        """ change Primary Keys if different to existing ones """
        if (PartitionKey != self._PartitionKey) or (RowKey != self._RowKey):
            return True, PartitionKey, RowKey
        else:
            return False, PartitionKey, RowKey
        pass
            
    def moveto(self, PartitionKey = '', RowKey = ''):
        """ Change Entity Primary Keys and move in Storage:

            - PartitionKey and/or
            - RowKey
        """
        changed, PartitionKey, RowKey = self.__changeprimarykeys__(PartitionKey, RowKey)

        if changed:

            """ sync self """
            new = self.copyto(PartitionKey, RowKey)
            new.save()

            """ delete Entity if exists in Storage """
            self.delete()

    def copyto(self, PartitionKey = '', RowKey = '') -> object:
        """ Change Entity Primary Keys and copy to new Instance:

            - PartitionKey and/or
            - RowKey
        """
        changed, PartitionKey, RowKey = self.__changeprimarykeys__(PartitionKey, RowKey)

        self.load()
        new = self
        new._PartitionKey = PartitionKey
        new._RowKey = RowKey
        new.load()

        return new

    def query(self, storagecollection) -> StorageTableCollection:
        if isinstance(storagecollection, StorageTableCollection):
            try:
                storagecollection.extend(self._tableservice.query_entities(storagecollection._tablename,storagecollection._filter))

            except AzureMissingResourceHttpError as e:
                log.debug('can not query table {} with filters {} because {!s}'.format(storagecollection._tablename, storagecollection._filter, e))            

            return storagecollection
        else:
            return None
# ACCOUNT_NAME = os.environ['StorageAccountName']
# ACCOUNT_KEY = os.environ['StorageAccountKey']
# CONTAINER_NAME = os.environ['TelemetryContainerName']

ACCOUNT_NAME = "snsr"
ACCOUNT_KEY = "YU4+T41CmWSRyxuV0v1KMKAGRfaz6PXnbcZK8mjgvV3TKj3jqHvU4T0uvrcQ6YWUuLu84KRCi17vIqlhaq1dRA=="
CONTAINER_NAME = "simulation"

table_service = TableService(account_name=ACCOUNT_NAME,
                             account_key=ACCOUNT_KEY)
table_service.create_table('devices')

device = {'PartitionKey': 'engine', 'RowKey': '1'}

table_service.insert_or_merge_entity('devices', device)

wk = np.array([1, 2, 3, 5, 12, 15])
Ak = [5, 8, 2 / 3, 8, 13, 5]

length = 1
fs = 8000


def generate(t, wk, Ak, s=None, last_speed=0, target_speed=0):
    N = length * fs
    ts = np.linspace(t, t + length, num=N)

    x = np.array([0, length]) + t
    points = np.array([last_speed, target_speed])
Example #3
0
class AztkCluster:
    def __init__(self,
                 vm_count=0,
                 sku_type='standard_d2_v2',
                 username='******',
                 password='******'):
        self.vm_count = int(vm_count)
        self.sku_type = sku_type
        self.username = username
        self.password = password
        self.BATCH_ACCOUNT_NAME = os.environ['BATCH_ACCOUNT_NAME']
        BATCH_ACCOUNT_KEY = os.environ['BATCH_ACCOUNT_KEY']
        BATCH_SERVICE_URL = os.environ['BATCH_ACCOUNT_URL']
        STORAGE_ACCOUNT_SUFFIX = 'core.windows.net'
        self.STORAGE_ACCOUNT_NAME = os.environ['STORAGE_ACCOUNT_NAME']
        self.STORAGE_ACCOUNT_KEY = os.environ['STORAGE_ACCOUNT_KEY']

        self.secrets_config = aztk.spark.models.SecretsConfiguration(
            shared_key=aztk.models.SharedKeyConfiguration(
                batch_account_name=self.BATCH_ACCOUNT_NAME,
                batch_account_key=BATCH_ACCOUNT_KEY,
                batch_service_url=BATCH_SERVICE_URL,
                storage_account_name=self.STORAGE_ACCOUNT_NAME,
                storage_account_key=self.STORAGE_ACCOUNT_KEY,
                storage_account_suffix=STORAGE_ACCOUNT_SUFFIX),
            ssh_pub_key="")
        self.table_service = TableService(
            account_name=self.STORAGE_ACCOUNT_NAME,
            account_key=self.STORAGE_ACCOUNT_KEY)

    def createCluster(self):

        # create a client
        client = aztk.spark.Client(self.secrets_config)

        # list available clusters
        clusters = client.list_clusters()

        SPARK_CONFIG_PATH = os.path.normpath(
            os.path.join(os.path.dirname(__file__), 'spark', 'spark',
                         '.config'))
        SPARK_JARS_PATH = os.path.normpath(
            os.path.join(os.path.dirname(__file__), 'spark', 'spark', 'jars'))

        SPARK_CORE_SITE = os.path.join(SPARK_CONFIG_PATH, 'core-site.xml')

        jars = glob.glob(os.path.join(SPARK_JARS_PATH, '*.jar'))

        # define spark configuration
        spark_conf = aztk.spark.models.SparkConfiguration(
            spark_defaults_conf=os.path.join(SPARK_CONFIG_PATH,
                                             'spark-defaults.conf'),
            spark_env_sh=os.path.join(SPARK_CONFIG_PATH, 'spark-env.sh'),
            core_site_xml=SPARK_CORE_SITE,
            jars=jars)

        clusterDetails = self.table_service.get_entity(
            'cluster', 'predictivemaintenance', 'predictivemaintenance')
        cluster_number = int(clusterDetails.ClusterNumber) + 1
        cluster_id = clusterDetails.PartitionKey + str(cluster_number)

        jupyterCustomScript = aztk.models.CustomScript(
            "jupyter",
            "D:/home/site/wwwroot/flask/spark/customScripts/jupyter.sh",
            "all-nodes")
        azuremlProjectFileShare = aztk.models.FileShare(
            self.STORAGE_ACCOUNT_NAME, self.STORAGE_ACCOUNT_KEY,
            'azureml-project', '/mnt/azureml-project')
        azuremlFileShare = aztk.models.FileShare(self.STORAGE_ACCOUNT_NAME,
                                                 self.STORAGE_ACCOUNT_KEY,
                                                 'azureml-share',
                                                 '/mnt/azureml-share')
        # configure my cluster
        cluster_config = aztk.spark.models.ClusterConfiguration(
            docker_repo='aztk/python:spark2.2.0-python3.6.2-base',
            cluster_id=
            cluster_id,  # Warning: this name must be a valid Azure Blob Storage container name
            vm_count=self.vm_count,
            # vm_low_pri_count=2, #this and vm_count are mutually exclusive
            vm_size=self.sku_type,
            custom_scripts=[jupyterCustomScript],
            spark_configuration=spark_conf,
            file_shares=[azuremlProjectFileShare, azuremlFileShare],
            user_configuration=UserConfiguration(
                username=self.username,
                password=self.password,
            ))
        try:
            cluster = client.create_cluster(cluster_config)
        except Exception as e:
            clusterDetails = {
                'PartitionKey': 'predictivemaintenance',
                'RowKey': 'predictivemaintenance',
                'Status': ClusterStatus.Failed,
                'UserName': self.username,
                'ClusterNumber': cluster_number,
                'Message': str(e)
            }
            self.table_service.insert_or_merge_entity('cluster',
                                                      clusterDetails)
            return

        clusterDetails = {
            'PartitionKey': 'predictivemaintenance',
            'RowKey': 'predictivemaintenance',
            'Status': ClusterStatus.Provisioning,
            'UserName': self.username,
            'ClusterNumber': cluster_number
        }
        self.table_service.insert_or_merge_entity('cluster', clusterDetails)

    def getCluster(self):
        # create a client
        client = aztk.spark.Client(self.secrets_config)
        try:
            clusterDetails = self.table_service.get_entity(
                'cluster', 'predictivemaintenance', 'predictivemaintenance')
        except Exception as e:
            clusterDetails = {
                'PartitionKey': 'predictivemaintenance',
                'RowKey': 'predictivemaintenance',
                'Status': ClusterStatus.NotCreated,
                'ClusterNumber': '0'
            }
            self.table_service.insert_or_merge_entity('cluster',
                                                      clusterDetails)
            return clusterDetails

        cluster_id = clusterDetails.PartitionKey + str(
            clusterDetails.ClusterNumber)
        if clusterDetails.Status == ClusterStatus.Deleted or clusterDetails.Status == ClusterStatus.NotCreated:
            return clusterDetails
        try:
            cluster = client.get_cluster(cluster_id=cluster_id)
            for node in cluster.nodes:
                remote_login_settings = client.get_remote_login_settings(
                    cluster.id, node.id)
                if node.state in [
                        batch_models.ComputeNodeState.unknown,
                        batch_models.ComputeNodeState.unusable,
                        batch_models.ComputeNodeState.start_task_failed
                ]:
                    errorMsg = "An error occured while starting the Nodes in the batch account " + self.BATCH_ACCOUNT_NAME + ". Details: "
                    if node.start_task_info.failure_info != None:
                        errorMsg += node.start_task_info.failure_info.message
                    clusterDetails = {
                        'PartitionKey': 'predictivemaintenance',
                        'RowKey': 'predictivemaintenance',
                        'Status': ClusterStatus.Failed,
                        'UserName': self.username,
                        'ClusterNumber': clusterDetails.ClusterNumber,
                        'Message': errorMsg
                    }
                    self.table_service.insert_or_merge_entity(
                        'cluster', clusterDetails)
                    return clusterDetails

                if node.id == cluster.master_node_id:
                    master_ipaddress = remote_login_settings.ip_address
                    master_Port = remote_login_settings.port
                    clusterDetails = {
                        'PartitionKey': 'predictivemaintenance',
                        'RowKey': 'predictivemaintenance',
                        'Status': ClusterStatus.Provisioned,
                        'Master_Ip_Address': master_ipaddress,
                        'Master_Port': master_Port,
                        'UserName': clusterDetails.UserName,
                        'ClusterNumber': clusterDetails.ClusterNumber
                    }
                    self.table_service.insert_or_merge_entity(
                        'cluster', clusterDetails)
        except (AztkError, BatchErrorException):
            clusterDetails = self.table_service.get_entity(
                'cluster', 'predictivemaintenance', 'predictivemaintenance')

        return clusterDetails

    def deleteCluster(self):

        # create a client
        client = aztk.spark.Client(self.secrets_config)
        clusterDetails = self.table_service.get_entity(
            'cluster', 'predictivemaintenance', 'predictivemaintenance')
        cluster_id = clusterDetails.PartitionKey + str(
            clusterDetails.ClusterNumber)
        try:
            client.delete_cluster(cluster_id=cluster_id)
        except Exception as e:
            clusterDetails = {
                'PartitionKey': 'predictivemaintenance',
                'RowKey': 'predictivemaintenance',
                'Status': ClusterStatus.DeletionFailed,
                'UserName': self.username,
                'ClusterNumber': clusterDetails.ClusterNumber,
                'Message': str(e)
            }
            self.table_service.insert_or_merge_entity('cluster',
                                                      clusterDetails)
            return

        clusterDetails = {
            'PartitionKey': 'predictivemaintenance',
            'RowKey': 'predictivemaintenance',
            'Status': ClusterStatus.Deleted,
            'ClusterNumber': clusterDetails.ClusterNumber
        }
        self.table_service.insert_or_merge_entity('cluster', clusterDetails)
Example #4
0
class TableStorage():
    def __init__(self, CONNECTION_STRING):
        """
        Constructor. Espera el Connection String del Azure Storage Account.
        Se obtiene ingresando al recurso de Storage -> Access Keys

        Parametros:
            CONNECTION_STRING   = El string que incluye el AccountName, 
                                AccountKey y el EndPointSuffix
        """
        self.CONNECTION_STRING = CONNECTION_STRING

        # Separa por partes el string de conexión
        Config = dict(
            s.split('=', 1) for s in CONNECTION_STRING.split(';') if s)

        # Obtiene el nombre de la cuenta de storage y en EndpointSuffix
        self.AccountName = Config.get('AccountName')
        self.EndPointSuffix = Config.get('EndpointSuffix')

    def CreateTableServices(self):
        """
        Inicializa una instancia del Table Services para poder comunicarse con 
        el storage en Azure
        """
        self.TableService = TableService(
            account_name=self.AccountName,
            connection_string=self.CONNECTION_STRING,
            endpoint_suffix=self.EndPointSuffix)

    def createTable(self, TableName):
        """
        Revisa si la tabla no exista ya y la crea. De lo contrario, avisa que ya existe.

        Paramentros:
            TableName   = Nombre de la tabla que se quiere crear
        """
        print('\nCreate a table with name - ' + TableName)

        if (self.TableService.exists(TableName) != True):
            self.TableService.create_table(TableName)
            print("Table created succesfully!")
        else:
            print('Error creating table, ' + TableName +
                  ' check if it already exists')

    def insertEntity(self, TableName, Entity):
        """
        Se inserta una entidad a la tabla especificada.

        Paramentros:
            TableName   = Nombre de la tabla que se quiere crear
            Entity      = El objecto con la entidad que se quiere agregar
        """
        print('\nInserting a new entity into table - ' + TableName)
        self.TableService.insert_or_merge_entity(TableName, Entity)
        print('Successfully inserted the new entity')

    def getEntity(self, TableName, PartitionKey, RowKey):
        """
        Traerse la entidad completa en base a la Partition Key y Row Key.
        
        Regresa un objeto como tal, no hay que hacer json.loads()
        
        Paramentros:
            TableName       = Nombre de la tabla que se quiere crear
            PartitionKey    = String con la partition key de la entidad deseada
            RowKey          = String con la row key de la entidad deseada
        """
        print('\nGetting entity.')
        Entity = self.TableService.get_entity(TableName, PartitionKey, RowKey)
        return Entity

    def updateEntity(self, TableName, NewEntity):
        """
        Toma el objeto con los datos actualizados y hace update en la table storage.
        
        Paramentros:
            TableName   = Nombre de la tabla que se quiere crear
            NewEntity   = El objecto con la entidad que se quiere hacer update
        """
        print('\nUpdating entity. PK: ' + NewEntity.PartitionKey + '  RK: ' +
              NewEntity.RowKey)
        self.TableService.update_entity(TableName, NewEntity)

    def deleteEntity(self, TableName, PartitionKey, RowKey):
        """
        Borrar la entidad que coincida en Partition Key y Row Key
        
        Paramentros:
            TableName       = Nombre de la tabla que se quiere crear
            PartitionKey    = String con la partition key de la entidad
            RowKey          = String con la row key de la entidad
        """
        print('\nDeleting entity')
        self.TableService.delete_entity(TableName, PartitionKey, RowKey)

    def deleteTable(self, TableName):
        """
        Revisa si la tabla existe y la borra, en caso contrario solo avisa que no existe.

        Paramentros:
            TableName   = Nombre de la tabla que se quiere borrar
        """
        print('\nDeleting the table.')
        if (self.TableService.exists(TableName)):
            self.TableService.delete_table(TableName)
            print('Successfully deleted the table')
        else:
            print('The table does not exists')
Example #5
0
from azure.storage.file import FileService

STORAGE_ACCOUNT_NAME = os.environ['STORAGE_ACCOUNT_NAME']
STORAGE_ACCOUNT_KEY = os.environ['STORAGE_ACCOUNT_KEY']

table_service = TableService(account_name=STORAGE_ACCOUNT_NAME,
                             account_key=STORAGE_ACCOUNT_KEY)

table_service.create_table('cluster')

asset = {
    'PartitionKey': 'predictivemaintenance',
    'RowKey': 'predictivemaintenance',
    'Status': 'Not Created'
}
table_service.insert_or_merge_entity('cluster', asset)

file_service = FileService(account_name=STORAGE_ACCOUNT_NAME,
                           account_key=STORAGE_ACCOUNT_KEY)
file_service.create_share(share_name='azureml-project', quota=1)
file_service.create_share(share_name='azureml-share', quota=1)

source = os.environ['AML_ASSETS_URL']
dest = 'azureml_project.zip'

urllib.request.urlretrieve(source, dest)

with zipfile.ZipFile(dest, "r") as zip_ref:
    zip_ref.extractall("azureml-project")

for root, dirs, files in os.walk('azureml-project', topdown=True):
Example #6
0
params = urllib.urlencode({})

controls = table_service.query_entities('wwcccs', filter=None)

for control in controls:
    id = control.PartitionKey + '-' + control.RowKey
    text = control.ControlDescription
    body = json.dumps({"documents": [{
        "id": id,
        "text": text,
    }]})
    try:
        conn = httplib.HTTPSConnection('westus.api.cognitive.microsoft.com')
        conn.request("POST", "/text/analytics/v2.0/keyPhrases?%s" % params,
                     body, headers)
        response = conn.getresponse()
        data = json.loads(response.read())
        documents = data['documents']
        kp = ", ".join(documents[0]['keyPhrases'])
        print(documents[0]['id'] + ": " + kp)
        updated_control = {
            'PartitionKey': control.PartitionKey,
            'RowKey': control.RowKey,
            'KeyPhrases': kp
        }
        table_service.insert_or_merge_entity('wwcccs', updated_control)
        conn.close()
    except Exception as e:
        print(id + ": " + e.message +
              "\n"), response.status, response.reason, response.msg