def get_keyvault_keys_and_secrets_metadata( keyvault_keys_and_secrets_metadata_path, keyvaults): metadata = {} kvm_client = KeyVaultManagementClient(credentials, subscription_id) #kv_client = get_keyvault_client() for keyvault in keyvaults: subsciption_id, resource_group, vault_name = parse_vault_id( keyvault['id']) vault_url = 'https://{vault_name}.vault.azure.net'.format( vault_name=vault_name) print('vault_url', vault_url) metadata[vault_name] = {} try: keys = json.loads( utils.call( "az keyvault key list --vault-name {}".format(vault_name))) metadata[vault_name]['keys'] = keys #keys = kv_client.get_keys(vault_url) #metadata[vault_name]['keys'] = get_list_from_paged_results(keys) except KeyVaultErrorException as e: if str(e.response) == '<Response [401]>': metadata[vault_name]['keys'] = "ERROR UNAUTHORIZED" else: print(e.with_traceback) print(e.response) print(kv_client.config.credentials) raise (Exception( "Unexpected KeyVaultErrorException response for vault {}". format(vault_url))) try: secrets = json.loads( utils.call("az keyvault secret list --vault-name {}".format( vault_name))) metadata[vault_name]['secrets'] = secrets #secrets = kv_client.get_secrets(vault_url) #metadata[vault_name]['secrets'] = get_list_from_paged_results(secrets) except KeyVaultErrorException as e: if str(e.response) == '<Response [401]>': metadata[vault_name]['secrets'] = "UNAUTHORIZED" else: print(e.with_traceback) print(e.response) raise (Exception("Unexpected KeyVaultErrorException response")) except AzScannerException as e: print("Error calling command {}".format(e.message)) print(e.with_traceback) with open(keyvault_keys_and_secrets_metadata_path, 'w') as f: json.dump(metadata, f, indent=4, sort_keys=True) return metadata
def endpoint_protection_for_all_virtual_machines_is_installed_7_6( virtual_machines): items_flagged_list = [] accepted_protections = set([ 'EndpointSecurity', 'TrendMicroDSA', 'Antimalware', 'EndpointProtection', 'SCWPAgent', 'PortalProtectExtension', 'FileSecurity', 'IaaSAntimalware' ]) for vm in virtual_machines: name = vm['name'] resource_group = vm['resourceGroup'] extensions = json.loads( utils.call( "az vm extension list --vm-name {name} --resource-group {resource_group}" .format(name=name, resource_group=resource_group))) has_protection = False for extension in extensions: if set([extension['virtualMachineExtensionType'] ]).intersection(accepted_protections): has_protection = True if not has_protection: items_flagged_list.append((resource_group, name)) stats = { 'items_flagged': len(items_flagged_list), 'items_checked': len(virtual_machines) } metadata = { "finding_name": "endpoint_protection_for_all_virtual_machines_is_installed", "negative_name": "", "columns": ["Resource Group", "Name"] } return {"items": items_flagged_list, "stats": stats, "metadata": metadata}
def get_network_flows(network_flows_path, network_security_groups): """ @network_flows_path: string - path to output json file @network_security_groups: list of nsgs @returns: list of network flow dicts """ network_flows = [] for nsg in network_security_groups: resource_group = nsg['resourceGroup'] nsg_id = nsg['id'] try: network_flow = json.loads( utils.call( "az network watcher flow-log show --resource-group {resource_group} --nsg {nsg_id}" .format(resource_group=resource_group, nsg_id=nsg_id))) nsg_name = nsg["name"] network_flows.append({ "resource_group": resource_group, "nsg_name": nsg_name, "network_flow": network_flow }) except Exception as e: print( "Exception was thrown! Unable to get network watcher flows. Check permissions." ) print(e) print(traceback.format_exc()) print("Continuing without network flows Data") break with open(network_flows_path, 'w') as f: json.dump(network_flows, f, indent=4, sort_keys=True) return network_flows
def get_virtual_machines(virtual_machines_path): """ @virtual_machines_path: string - path to output json file @returns: list of virtual_machines dict """ virtual_machines = json.loads(utils.call("az vm list")) for vm in virtual_machines: name = vm['name'] resource_group = vm['resourceGroup'] encrypted = utils.call("""az vm encryption show --name {name} --resource-group {resource_group} --query dataDisk""".format(name=name, resource_group=resource_group)) if encrypted in ["", "Azure Disk Encryption is not enabled"]: vm['storageProfile']['dataDisksEncrypted'] = False else: vm['storageProfile']['dataDisksEncrypted'] = True if vm["networkProfile"]: ifaces = vm["networkProfile"]["networkInterfaces"] if ifaces: for iface in ifaces: nic_name = iface['id'].split('/')[-1] try: ifconfig = json.loads( utils.call("""az vm nic show -g {resource_group} --vm-name {vm_name} --nic {nic_name}""".format( resource_group=resource_group, vm_name=name, nic_name=nic_name))) iface.update(ifconfig) except Exception as e: print("az vm nic show failed") print(traceback.format_exc()) logger.warning(traceback.format_exc()) # extensions = json.loads(utils.call("""az vm extension list # --vm-name {name} # --resource-group {resource_group}""".format( # name=name, # resource_group=resource_group))) # vm['extensions'] = extensions with open(virtual_machines_path, 'w') as f: json.dump(virtual_machines, f, indent=4, sort_keys=True) return virtual_machines
def get_network_watcher(network_watcher_path): """ @network_watcher_path: string - path to output json file """ network_watcher = json.loads(utils.call("az network watcher list")) with open(network_watcher_path, 'w') as f: json.dump(network_watcher, f, indent=4, sort_keys=True) return network_watcher
def get_network_security_groups(network_security_groups_path): """ @network_path: string - path to output json file """ network_security_groups = json.loads(utils.call("az network nsg list")) with open(network_security_groups_path, 'w') as f: json.dump(network_security_groups, f, indent=4, sort_keys=True) return network_security_groups
def get_resource_ids_for_diagnostic_settings(): resource_ids = [] # Other resource_ids could be gathered. So far, only keyvault keyvaults = json.loads(utils.call("az keyvault list")) for keyvault in keyvaults: resource_ids.append(keyvault['id']) with open(resource_ids_for_diagnostic_settings_path, 'w') as f: json.dump(resource_ids, f, indent=4, sort_keys=True) return resource_ids
def get_storage_accounts(storage_accounts_path): """ Query Azure api for storage accounts info and save to disk """ storage_accounts_cmd = "az storage account list" storage_accounts = json.loads(utils.call(storage_accounts_cmd)) with open(storage_accounts_path, 'w') as f: json.dump(storage_accounts, f, indent=4, sort_keys=True) return storage_accounts
def get_monitor_diagnostic_settings(monitor_diagnostic_settings_path, resource_ids): """ @monitor_diagnostic_settings_path: string - path to output json file @returns: list of activity_log_alerts dicts """ monitor_diagnostic_settings_results = {} for resource_id in resource_ids: monitor_diagnostic_settings = json.loads(utils.call("az monitor diagnostic-settings list --resource {resource_id}".format(resource_id=resource_id))) monitor_diagnostic_settings_results[resource_id] = monitor_diagnostic_settings with open(monitor_diagnostic_settings_path, 'w') as f: json.dump(monitor_diagnostic_settings_results, f, indent=4, sort_keys=True) return monitor_diagnostic_settings_results
def get_activity_logs(activity_logs_path, resource_groups): if os.path.exists(activity_logs_path): print("activity_logs_path {} exists, using existing values".format(activity_logs_path)) return activity_logs = {} start_time = get_start_time(activity_logs_starttime_timedelta) for resource_group in resource_groups: resource_group = resource_group['name'] activity_log = json.loads(utils.call("az monitor activity-log list --resource-group {resource_group} --start-time {start_time}".format( resource_group=resource_group, start_time=start_time))) activity_logs[resource_group] = activity_log with open(activity_logs_path, 'w') as f: json.dump(activity_logs, f, indent=4, sort_keys=True) return activity_logs
def public_access_level_is_set_to_private_for_blob_containers_3_6( storage_accounts): items_flagged_list = [] items_checked = 0 for account in storage_accounts: account_name = account["name"] resource_group = account["resourceGroup"] # get a key that works. likely this will be a specific key not key[0] keys_cmd = "az storage account keys list --account-name {account_name} --resource-group {resource_group}".format( account_name=account_name, resource_group=resource_group) keys = json.loads(utils.call(keys_cmd)) account_key = keys[0]['value'] try: container_list_cmd = "az storage container list --account-name {account_name} --account-key {account_key}".format( account_name=account_name, account_key=account_key) container_list = json.loads(utils.call(container_list_cmd)) for container in container_list: print(container) items_checked += 1 public_access = container["properties"]["publicAccess"] if public_access == True: items_flagged_list.append((account_name, container)) except Exception as e: print("ERROR: insufficient permissions to list containers") logger.warning(traceback.format_exc()) stats = { 'items_flagged': len(items_flagged_list), "items_checked": items_checked } metadata = { "finding_name": "public_access_level_is_set_to_private_for_blob_containers", "negative_name": "public_access_level_not_private_for_blob_containers", "columns": ["Storage Account Name", "Container"] } return {"items": items_flagged_list, "stats": stats, "metadata": metadata}
def get_resource_diagnostic_settings(resource_ids_for_diagnostic_settings): keyvault_settings_list = [] for resource_id in resource_ids_for_diagnostic_settings: keyvault_settings = json.loads(utils.call("az monitor diagnostic-settings list --resource {resource_id}".format(resource_id=resource_id))) *prefix, resource_group, _, _, _, keyvault_name = resource_id.split('/') if not keyvault_settings['value']: keyvault_settings['value'].append({'keyvault_name': keyvault_name, 'resourceGroup': resource_group}) else: for setting in keyvault_settings['value']: setting['keyvault_name'] = keyvault_name print(keyvault_settings) keyvault_settings_list.append(keyvault_settings) with open(resource_diagnostic_settings_path, 'w') as f: yaml.dump(keyvault_settings_list, f) return resource_ids_for_diagnostic_settings
def only_approved_extensions_are_installed_7_4(virtual_machines): # items in the following list do not imply failure, but require review items_flagged_list = [] approved_extensions = [ 'AzureDiskEncryption', 'IaaSAntimalware', 'IaaSDiagnostics', 'MicrosoftMonitoringAgent', 'SqlIaaSAgent', 'OmsAgentForLinux', 'VMAccessForLinux', ] for vm in virtual_machines: name = vm['name'] resource_group = vm['resourceGroup'] extensions = json.loads( utils.call( "az vm extension list --vm-name {name} --resource-group {resource_group}" .format(name=name, resource_group=resource_group))) if vm.get("resources", []): for resource in vm.get("resources", []): extension_name = resource["id"].split('/')[-1] extension_names = [] if extension_name not in approved_extensions: extension_names.append(extension_name) if extension_names: items_flagged_list.append( (resource_group, name, extension_names)) stats = { 'items_flagged': len(items_flagged_list), 'items_checked': len(virtual_machines) } metadata = { "finding_name": "only_approved_extensions_are_installed", "negative_name": "", "columns": ["Resource Group", "VM Name", "Extension Name"] } return {"items": items_flagged_list, "stats": stats, "metadata": metadata}
def get_monitor_log_profiles(monitor_log_profiles_path): monitor_log_profiles = json.loads(utils.call("az monitor log-profiles list")) with open(monitor_log_profiles_path, 'w') as f: json.dump(monitor_log_profiles, f, indent=4, sort_keys=True) return monitor_log_profiles
def get_locked_resources(): lock_list = json.loads(utils.call("az lock list")) with open(locked_resources_path, 'w') as f: json.dump(lock_list, f, indent=4, sort_keys=True) return lock_list
def get_sql_servers(sql_servers_path): sql_servers = json.loads(utils.call("az sql server list")) with open(sql_servers_path, 'w') as f: json.dump(sql_servers, f, indent=4, sort_keys=True) return sql_servers
def get_activity_log_alerts(activity_log_alerts_path): activity_log_alerts = json.loads(utils.call("az monitor activity-log alert list")) with open(activity_log_alerts_path, 'w') as f: json.dump(activity_log_alerts, f, indent=4, sort_keys=True) return activity_log_alerts