def set_connect(self, secret_data): subscription_id = secret_data['subscription_id'] os.environ["AZURE_SUBSCRIPTION_ID"] = subscription_id os.environ["AZURE_TENANT_ID"] = secret_data['tenant_id'] os.environ["AZURE_CLIENT_ID"] = secret_data['client_id'] os.environ["AZURE_CLIENT_SECRET"] = secret_data['client_secret'] credential = DefaultAzureCredential() self.compute_client = ComputeManagementClient( credential=credential, subscription_id=subscription_id) self.vm_compute_client = ComputeManagementClient( credential=credential, subscription_id=subscription_id, expand='instanceView') self.resource_client = ResourceManagementClient( credential=credential, subscription_id=subscription_id) self.network_client = NetworkManagementClient( credential=credential, subscription_id=subscription_id) self.subscription_client: SubscriptionClient = SubscriptionClient( credential=credential) self.sql_client = SqlManagementClient(credential=credential, subscription_id=subscription_id) self.monitor_client = MonitorManagementClient( credential=credential, subscription_id=subscription_id)
def _get_profiles(self, attribute_type, sub_index, sub): """Return an Azure monitor record. Arguments: attribute_type (str): Attribute type name. sub_index (int): Subscription index (for logging only). sub (Subscription): Azure subscription object. Yields: dict: An Azure monitor record. """ _log.info('Working on %s', util.outline_az_sub(sub_index, sub, self._tenant)) try: monitor_client = \ MonitorManagementClient(self._credentials, sub.get('subscription_id')) iterator = \ _get_attribute_iterator(attribute_type, monitor_client, sub, sub_index, self._tenant) yield from _get_record(iterator, attribute_type, self._max_recs, sub_index, sub, self._tenant) except Exception as e: _log.error('Failed to fetch details for %s; %s; error: %s: %s', attribute_type, util.outline_az_sub(sub_index, sub, self._tenant), type(e).__name__, e)
def azure_connect_service(service, credentials, region_name=None): try: if service == 'storageaccounts': return StorageManagementClient(credentials.credentials, credentials.subscription_id) elif service == 'monitor': return MonitorManagementClient(credentials.credentials, credentials.subscription_id) elif service == 'sqldatabase': return SqlManagementClient(credentials.credentials, credentials.subscription_id) elif service == 'keyvault': return KeyVaultManagementClient(credentials.credentials, credentials.subscription_id) elif service == 'appgateway': return NetworkManagementClient(credentials.credentials, credentials.subscription_id) elif service == 'network': return NetworkManagementClient(credentials.credentials, credentials.subscription_id) elif service == 'rediscache': return RedisManagementClient(credentials.credentials, credentials.subscription_id) elif service == 'securitycenter': return SecurityCenter(credentials.credentials, credentials.subscription_id, '') elif service == 'appservice': return WebSiteManagementClient(credentials.credentials, credentials.subscription_id) elif service == 'loadbalancer': return NetworkManagementClient(credentials.credentials, credentials.subscription_id) else: printException('Service %s not supported' % service) return None except Exception as e: printException(e) return None
def get_subscription_info(credential): return [[ sub.subscription_id, MonitorManagementClient(credential=credential, subscription_id=sub.subscription_id) ] for sub in SubscriptionClient( credential=credential).subscriptions.list()]
async def _get_and_set_activity_logs(self, storage_account, subscription_id: str): client = MonitorManagementClient(self.credentials.arm_credentials, subscription_id) # Time format used by Azure API: time_format = "%Y-%m-%dT%H:%M:%S.%f" # Azure API uses UTC time, we need to use the same to avoid bad requests: utc_now = datetime.datetime.utcnow() # Activity logs are only archived for a period of 90 days max (requesting a timespan of more than that ends up # with a bad request): timespan = datetime.timedelta(90) logs_filter = " and ".join([ "eventTimestamp ge {}".format( (utc_now - timespan).strftime(time_format)), "eventTimestamp le {}".format(utc_now.strftime(time_format)), "resourceId eq {}".format(storage_account.id), ]) try: activity_logs = await run_concurrently(lambda: list( client.activity_logs.list(filter=logs_filter, select= "eventTimestamp, operationName"))) except Exception as e: print_exception('Failed to retrieve activity logs: {}'.format(e)) setattr(storage_account, 'activity_logs', []) else: setattr(storage_account, 'activity_logs', activity_logs)
def get_all_webapps(credentials, rgs): all_webapps = [] for sub, groups in rgs.items(): web_client = WebSiteManagementClient(credentials, sub) monitor_client = MonitorManagementClient(credentials, sub) for rg in groups: for site in web_client.web_apps.list_by_resource_group(rg): get_config = web_client.web_apps.get_configuration( rg, site.name) resource_id = f"/subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.Web/sites/{site.name}" AuditLogs = "No Audit Logs" try: monitor_client.diagnostic_settings.get( resource_id, "diagnostic-log-name", # Place the name of your diagnostic log here! ) AuditLogs = True except ClientException as ex: pass webapp_data = dict({ "Subscription": sub, "Resource Group": rg, "App Service Name": site.name, "HTTPS_ONLY": site.https_only, "FTPS": get_config.ftps_state, "TLS": get_config.min_tls_version, "Always-On": get_config.always_on, "Audit Logs": AuditLogs, "Kind": site.kind, "Location": site.location, }) all_webapps.append(webapp_data) return json.dumps(all_webapps)
def getMeasurement(measurement): # Create a client TENANT_ID = clientConfig.TENANT_ID CLIENT = clientConfig.CLIENT KEY = clientConfig.KEY credentials = ServicePrincipalCredentials( client_id = CLIENT, secret = KEY, tenant = TENANT_ID ) # Get the ARM id of your resource. You might chose to do a "get" # using the according management or to build the URL directly # Example for a ARM VM subscription_id = clientConfig.subscription_id resource_group_name = clientConfig.resource_group_name vm_name = clientConfig.vm_name resource_id = ( "subscriptions/{}/" "resourceGroups/{}/" "providers/Microsoft.Compute/virtualMachines/{}" ).format(subscription_id, resource_group_name, vm_name) # Create client client = MonitorManagementClient( credentials, subscription_id ) # Get measurement of the last minute for this VM today = datetime.datetime.now() #.strftime("%Y/%m/%d %H:%M:%S") lastminute = today - datetime.timedelta(minutes=1) metrics_data = client.metrics.list( resource_uri = resource_id, timespan="{}/{}".format(lastminute, today), interval='PT1M', metricnames= measurement, aggregation='Total' ) for item in metrics_data.value: # azure.mgmt.monitor.models.Metric for timeserie in item.timeseries: for data in timeserie.data: # azure.mgmt.monitor.models.MetricData return { "VM": vm_name, "resource_group_name": resource_group_name, "Measurement": measurement, "data": data.total, }
def __init__(self, az_account: 'account.AZAccount') -> None: """Initialize the Azure monitoring class. Args: az_account (AZAccount): An Azure account object. """ self.monitoring_client = MonitorManagementClient( az_account.credentials, az_account.subscription_id)
def __init__(self, credentials, subscription_id, email_client): self.subscription_id = subscription_id self.resource_client = ResourceManagementClient( credentials, subscription_id) self.monitor_client = MonitorManagementClient(credentials, subscription_id) self.compute_client = ComputeManagementClient(credentials, subscription_id) self.email_client = email_client
def client(self): """ Initializes new monitoring client for Azure services. """ self._client = None # Instantiate new monitoring client self._client = MonitorManagementClient(self.credentials, self.subscription_id) return self._client
def get_metric_data_capacity(resource_group_name, storage_account_name, subscription_id, type): client = MonitorManagementClient(credentials, subscription_id) today = datetime.datetime.utcnow().date() yesterday = today - datetime.timedelta(days=1) resource_id = ("subscriptions/{}/" "resourceGroups/{}/" "providers/Microsoft.Storage/storageAccounts/{}/{}") metrics_data = None if (type == Metric_type.blob_capacity): resource_id = resource_id.format(subscription_id, resource_group_name, storage_account_name, 'blobServices/default') metrics_data = client.metrics.list(resource_id, timespan="{}/{}".format( yesterday, today), interval='PT1H', metric='Blob capacity', aggregation='Average') if (type == Metric_type.fileshare_capacity): resource_id = resource_id.format(subscription_id, resource_group_name, storage_account_name, 'fileServices/default') metrics_data = client.metrics.list(resource_id, timespan="{}/{}".format( yesterday, today), interval='PT1H', metric='File capacity', aggregation='Average') if (metrics_data.value is None): return METRICS_NOT_AVAILABLE for item in metrics_data.value: for item in item.timeseries: if (len(item.data) > 0): data = item.data[-1] if (data.average is not None): return data.average else: return METRICS_NOT_AVAILABLE return METRICS_NOT_AVAILABLE
def init() -> MonitorManagementClient: secrets = load_secrets() subscription_id = load_subscription_id() base_url = secrets.get('cloud').endpoints.resource_manager with auth(secrets) as credentials: client = MonitorManagementClient(credential=credentials, subscription_id=subscription_id, base_url=base_url) return client
def __init__(self, creds, subscription_id, resource_group, provider, resource_name, base_url=None): self.logger = logging.getLogger() self.client = MonitorManagementClient(creds, subscription_id, base_url) self.subscription_id = subscription_id self.provider = provider self.resource_id = "/subscriptions/{0}/resourceGroups/{1}/providers/{2}/{3}".format( subscription_id, resource_group, provider, resource_name)
def __init__(self): self.credentials = ServicePrincipalCredentials( client_id=CLIENT_ID, secret=SECRET, tenant=TENANT, ) self.cr = UserPassCredentials('*****@*****.**', 'Roshan@123') self.monitor_client = MonitorManagementClient(self.cr, subscription_id) self.compute_client = ComputeManagementClient(self.cr, subscription_id) self.network_client = NetworkManagementClient(self.cr, subscription_id) self.resource_client = ResourceManagementClient( self.cr, subscription_id)
def setup(self): SUBSCRIPTION_ID = '2f50f202-0a84-4c8c-a929-fcc5a3174590' GROUP_NAME = 'OmkarVmPlzDoNotRemove' LOCATION = 'West US' VM_NAME = 'OmkarVmPlzDoNotRemoveThis' resource_id = ( "subscriptions/{}/" "resourceGroups/{}/" "providers/Microsoft.Compute/virtualMachines/{}" ).format(SUBSCRIPTION_ID, GROUP_NAME, VM_NAME) credentials = get_credentials() client = MonitorManagementClient( credentials, SUBSCRIPTION_ID )
def azure_connect_service(service, credentials, region_name=None): try: if service == 'storageaccounts': return StorageManagementClient(credentials.credentials, credentials.subscription_id) elif service == 'monitor': return MonitorManagementClient(credentials.credentials, credentials.subscription_id) else: printException('Service %s not supported' % service) return None except Exception as e: printException(e) return None
def get_final_list(): """ This function retrieves the aggregated metric values for every resourceID and appends it to a list. :return: list of dictionaries """ final_list = [] startTime = datetime.datetime.now() - datetime.timedelta(hours=24) endTime = datetime.datetime.now() subID_resourceID_list = get_subID_resourceID_list() for subID_resourceID in subID_resourceID_list: monitor_client = MonitorManagementClient(credentials=credentials, subscription_id=subID_resourceID['subscriptionID']) for metric in metrics: metrics_data = monitor_client.metrics.list( subID_resourceID['resourceID'], timespan="{}/{}".format(startTime, endTime), interval='PT1H', metricnames=metric, aggregation='Minimum, Maximum, Average' ) for iter2 in metrics_data.value: for ts in iter2.timeseries: for data in ts.data: time_stamp = str('{}'.format(data.time_stamp)) final_list.append({'subscription': subID_resourceID['subscriptionID'], 'resource': subID_resourceID['resourceID'], 'metricType': metric, 'timestamp': time_stamp, 'unit': iter2.unit.name, 'minimum': data.minimum, 'maximum': data.maximum, 'average': data.average}) return final_list
from config.config import queueConf, DATABASE_URI, ACI_CONFIG, azure_context from azure.servicebus import ServiceBusService, Message, Queue from azure.mgmt.monitor import MonitorManagementClient from flask import Flask, render_template, request, Response import json import sys from pymongo import MongoClient from bson.json_util import dumps import requests import random import traceback # TODO: Use Azure's sentiment analysis tool to perform everything #The monitor client to get container group metrics monitor_client = MonitorManagementClient(azure_context.credentials, azure_context.subscription_id) #set up the service bus queue bus_service = ServiceBusService( service_namespace=queueConf['service_namespace'], shared_access_key_name=queueConf['saskey_name'], shared_access_key_value=queueConf['saskey_value']) #Connect to the databases client = MongoClient(DATABASE_URI + "&ssl=true") db = client.containerstate #Preset respones SUCCESS = Response(json.dumps({'success': True}), status=200, mimetype='application/json')
resource_group_name = resource_group_match.group(1) authorization_rule_keys = sb_client.namespaces.list_keys( resource_group_name=resource_group_name, namespace_name=sb.name, authorization_rule_name='RootManageSharedAccessKey') conn_str = authorization_rule_keys.primary_connection_string sb_admin_client = ServiceBusAdministrationClient.from_connection_string( conn_str) all_queues = [ queue.name for queue in sb_admin_client.list_queues() ] # create client monitor_client = MonitorManagementClient(DefaultAzureCredential(), subscription_id) # Create action group action_group = monitor_client.action_groups.create_or_update( resource_group_name, SHORT_APP_NAME, { "location": "Global", "group_short_name": SHORT_APP_NAME, "enabled": True, #"email_receivers": [ # { # "name": "personalEmail",
tenant='2cc639d3-af51-4877-9056-5b92bafdf00d' ) resource_group_name = 'mitiscctest' app_name = 'mitiscctest' subscription_id = '312c8cd6-c19b-48f1-9004-ee9e83fd18ef' resource_id_web = ( "subscriptions/{}/" "resourceGroups/{}/" "providers/Microsoft.Web/sites/{}" ).format(subscription_id, resource_group_name, app_name) resource_id_serverfarms = "/subscriptions/312c8cd6-c19b-48f1-9004-ee9e83fd18ef/resourceGroups/mitiscctest/providers/Microsoft.Web/serverFarms/mititestserviceplan" resource_id_databases = "/resourceGroups/mitiscctest/providers/Microsoft.Sql/servers/mititestserver/databases/minitestdatabases" # create client client = MonitorManagementClient( credentials, subscription_id ) # create client client = MonitorManagementClient( credentials, subscription_id ) today = datetime.datetime.now().date() todays = str(today)+'T00:00:00Z' yesterday = today - datetime.timedelta(hours=12) week = str(today - datetime.timedelta(hours=336))+'T00:00:00Z' timespan = week + '/' + todays print(timespan)
secret = CLIENT_SECRET, tenant = TENANT_ID ) # Get the ARM id of your resource. You might chose to do a "get" # using the according management or to build the URL directly # Example for a ARM VM resource_id = ( "subscriptions/{}/" "resourceGroups/{}/" "providers/Microsoft.Compute/virtualMachines/{}" ).format(SUBSCRIPTION_ID, <RESOURCE_GRP_NAME>, <VM NAME>) # create client client = MonitorManagementClient( credentials, SUBSCRIPTION_ID ) # You can get the available metrics of this specific resource for metric in client.metric_definitions.list(resource_id): # azure.monitor.models.MetricDefinition print("{}: id={}, unit={}".format( metric.name.localized_value, metric.name.value, metric.unit )) # Example of result for a VM: # Percentage CPU: id=Percentage CPU, unit=Unit.percent # Network In: id=Network In, unit=Unit.bytes # Network Out: id=Network Out, unit=Unit.bytes
from azure.mgmt.monitor.models import ScaleRule from azure.mgmt.monitor.models import MetricTrigger from azure.mgmt.monitor.models import ActivityLogAlertResource from azure.mgmt.monitor.models import ActivityLogAlertAllOfCondition from azure.mgmt.monitor.models import ActivityLogAlertLeafCondition subscriptionId = os.getenv('WEBSITE_OWNER_NAME').split('+')[0] credentials = CredentialWrapper() computeclient = ComputeManagementClient(credentials, subscriptionId) networkClient = NetworkManagementClient(credentials, subscriptionId) auth_client = AuthorizationManagementClient(credentials, subscriptionId) kv_client = KeyVaultManagementClient(credentials, subscriptionId) resource_client = ResourceManagementClient(credentials, subscriptionId) storage_client = StorageManagementClient(credentials, subscriptionId) monitor_client = MonitorManagementClient(credentials, subscriptionId) def get_storage_account_keys(resource_group, storage_account): try: storage_acc_info = storage_client.storage_accounts.list_keys( resource_group, storage_account) for keys in storage_acc_info.keys: return keys.value except Exception as ex: print('Exception:') print(ex) sys.exit(1) def get_sas_token(resourceGroupName, storageAccount):
def azure_dbs(): tenant_id = config.TENANT application_id = config.CLIENT_ID_JENKINS application_secret = config.CLIENT_SECRET_JENKINS subscription_id = config.SUBSCRIPTION_ID credentials = ServicePrincipalCredentials(client_id=application_id, secret=application_secret, tenant=tenant_id) resource_client = ResourceManagementClient(credentials, subscription_id) sql_client = PostgreSQLManagementClient(credentials, subscription_id) monitor_client = MonitorManagementClient(credentials, subscription_id) now = datetime.datetime.now().time() hour_ago = datetime.datetime.now() - datetime.timedelta(hours=1) day_ago = datetime.datetime.now() - datetime.timedelta(days=1) databases = sql_client.servers.list() resources = sql_client.servers.list() def get_metric(resourceid, metric, aggregation): metric_data = monitor_client.metrics.list(resource_uri=resourceid, timespan="{}/{}".format( day_ago, now), interval='PT15M', metricnames=metric, aggregation=aggregation) return metric_data def get_timeseries_current_value(metric_data): x_values, y_values = [], [] for item in metric_data.value: for timeserie in item.timeseries: for data in timeserie.data: current_value = data.average return current_value def get_timeseries_avg_data(metric_data): x_values, y_values = [], [] for item in metric_data.value: for timeserie in item.timeseries: for data in timeserie.data: x_metrics, y_metrics = [], [] x_values.append("".join(str(data.average))) y_values.append("".join(str(data.time_stamp))) x_metrics.extend(x_values) y_metrics.extend(y_values) return x_metrics, y_metrics, metric_name # for database in databases: # resourceid = database.id # cpu_usage = get_metric(resourceid,'cpu_percent','Average') # # storage_usage = get_metric(resourceid,'storage_percent','Average') # # cpu_metrics = get_timeseries_avg_data(cpu_usage, 10, 'CPU percent (Avg)') # # storage_metrics = get_timeseries_avg_data(storage_usage, 10, 'Storage percent (Avg)') if request.method == 'POST': requested_database = request.form["databases"] cpu_usage = get_metric(requested_database, 'cpu_percent', 'Average') current_value = get_timeseries_current_value(cpu_usage) message = current_value flash(message) return render_template('az_services/databases.html', databases=databases)
TENANT_ID = common()["TENANT_ID"] RESOURCE_GROUP_NAME = common()["RESOURCE_GROUP_NAME"] VM_NAMES_01 = vm()["VM_NAMES_01"] VM_NAMES_02 = vm()["VM_NAMES_02"] resource_id = ("subscriptions/{}/" "resourceGroups/{}/" "providers/Microsoft.Compute/virtualMachines/{}").format( SUBSCRIPTION_ID, RESOURCE_GROUP_NAME, VM_NAMES_02) credentials = ServicePrincipalCredentials(client_id=CLIENT_ID, secret=SECRET, tenant=TENANT_ID) metrics_client = MonitorManagementClient(credentials, SUBSCRIPTION_ID) today = datetime.datetime.now().date() tomorrow = today + datetime.timedelta(days=1) if __name__ == '__main__': metrics_data_cpu = metrics_client.metrics.list( resource_id, timespan="{}/{}".format(today, tomorrow), interval='PT1H', metricnames='Percentage CPU', aggregation='Total') metrics_data_hard_drive = metrics_client.metrics.list( resource_id, timespan="{}/{}".format(today, tomorrow),
def get_monitor_client() -> MonitorManagementClient: return MonitorManagementClient(get_identity(), get_subscription())
lt_50 = "False" # Iterate all vms and export data utilization to CSV. with open('/home/yahav/cpu_memory_utilization_average.csv', 'a') as file: field_names = [ 'Resource id', 'Average CPU', 'Maximum CPU', 'Average Memory', 'Maximum Memory', 'Total Memory(MB)', 'Vm Size', 'Region', 'LT 50%' ] writer = csv.DictWriter(file, fieldnames=field_names) writer.writeheader() for sub in list(subscription_ids): compute_client = ComputeManagementClient( credential, subscription_id=sub.subscription_id) monitor_client = MonitorManagementClient( credential, subscription_id=sub.subscription_id) resource_client = ResourceManagementClient( credential, subscription_id=sub.subscription_id) # resource_group_client = ResourceManagementClient(credential, subscription_id=sub.subscription_id) vm_list = compute_client.virtual_machines.list_all() # rg_list = resource_group_client.resource_groups.list() # for rg in rg_list: for vm in list(vm_list): generalized_vms = compute_client.virtual_machines.get( resource_group_name=vm.id.split('/')[4], vm_name=vm.name, expand='instanceView') generalized = generalized_vms.instance_view.statuses # Exclude vms that in "OSState/generalized" state. if generalized[0].code == "OSState/generalized":