def store_data_record(self): """ creates the indexes on collection and stores the data record in database or creates the generated snapshot at file system """ data_record = self.create_database_record() if get_dbtests(): if get_collection_size(data_record['collection']) == 0: #Creating indexes for collection create_indexes(data_record['collection'], config_value(DATABASE, DBNAME), [('snapshotId', pymongo.ASCENDING), ('timestamp', pymongo.DESCENDING)]) create_indexes(data_record['collection'], config_value(DATABASE, DBNAME), [('_id', pymongo.DESCENDING), ('timestamp', pymongo.DESCENDING), ('snapshotId', pymongo.ASCENDING)]) insert_one_document(data_record, data_record['collection'], self.dbname, check_keys=False) else: snapshot_dir = make_snapshots_dir(self.container) if snapshot_dir: store_snapshot(snapshot_dir, data_record) if 'masterSnapshotId' in self.node: self.snapshot_data[ self.node['snapshotId']] = self.node['masterSnapshotId'] else: self.snapshot_data[self.node['snapshotId']] = False if ( 'error' in data_record and data_record['error']) else True self.node['status'] = 'active'
def store_data_node(self, data): """Store the data in the filesystem""" # Make a snapshots directory if DB is NONW snapshot_dir = make_snapshots_dir(self.container) if snapshot_dir: store_snapshot(snapshot_dir, data)
def populate_kubernetes_snapshot(snapshot, container=None): snapshot_nodes = get_field_value(snapshot,'nodes') snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes) dbname = config_value('MONGODB', 'dbname') if valid_snapshotids and snapshot_nodes: logger.debug(valid_snapshotids) try : for node in snapshot_nodes: validate = node['validate'] if 'validate' in node else True logger.info(node) if 'snapshotId' in node: if validate: kubernetes_snapshot_data = get_kubernetes_snapshot_data(snapshot,node) if kubernetes_snapshot_data : error_str = kubernetes_snapshot_data.pop('error', None) kubernetes_snapshot_template = make_kubernetes_snapshot_template( snapshot, node, kubernetes_snapshot_data ) if get_dbtests(): if get_collection_size(kubernetes_snapshot_template['collection']) == 0: #Creating indexes for collection create_indexes( kubernetes_snapshot_template['collection'], config_value(DATABASE, DBNAME), [ ('snapshotId', pymongo.ASCENDING), ('timestamp', pymongo.DESCENDING) ] ) create_indexes( kubernetes_snapshot_template['collection'], config_value(DATABASE, DBNAME), [ ('_id', pymongo.DESCENDING), ('timestamp', pymongo.DESCENDING), ('snapshotId', pymongo.ASCENDING) ] ) insert_one_document(kubernetes_snapshot_template, kubernetes_snapshot_template['collection'], dbname,check_keys=False) snapshot_dir = make_snapshots_dir(container) if snapshot_dir: store_snapshot(snapshot_dir, kubernetes_snapshot_template) if "masterSnapshotId" in node : snapshot_data[node['snapshotId']] = node['masterSnapshotId'] elif "snapshotId" in node : snapshot_data[node['snapshotId']] = False if error_str else True else: node['status'] = 'inactive' elif 'masterSnapshotId' in node: snapshot_data = generate_crawler_snapshot(snapshot,node,snapshot_data) except Exception as ex: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] logger.error('can not connect to kubernetes cluster: %s ', ex ) logger.error('\t ERROR INFO : \n \tfile name : %s\n \tline : %s\n \ttype : %s\n \tobject : %s',fname,exc_tb.tb_lineno,exc_type,exc_obj) print(traceback.format_exc()) raise ex return snapshot_data
def populate_custom_snapshot(snapshot, container=None): """ Populates the resources from git.""" dbname = config_value('MONGODB', 'dbname') snapshot_source = get_field_value(snapshot, 'source') connector_data = get_from_currentdata('connector') if connector_data: sub_data = get_custom_data(connector_data) if not sub_data: logger.error("No connector data found in '%s'", connector_data) else: sub_data = get_custom_data(snapshot_source) snapshot_nodes = get_field_value(snapshot, 'nodes') snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes) if valid_snapshotids and sub_data and snapshot_nodes: baserepo, repopath = _get_repo_path(sub_data, snapshot) if repopath: brnch = get_field_value_with_default(sub_data, 'branchName', 'master') for node in snapshot_nodes: node_type = node['type'] if 'type' in node and node['type'] else '' if node_type in TEMPLATE_NODE_TYPES: template_data = { "container" : container, "dbname" : dbname, "snapshot_source" : snapshot_source, "connector_data" : sub_data, "snapshot_data" : snapshot_data, "repopath" : repopath, "snapshot" : snapshot } template_processor = TEMPLATE_NODE_TYPES[node_type](node, **template_data) if 'snapshotId' in node: snapshot_data = template_processor.populate_template_snapshot() elif 'masterSnapshotId' in node: snapshot_data = template_processor.populate_all_template_snapshot() elif 'paths' in node: logger.error("ERROR: Invalid json : `%s` is not a valid node type." % (node_type)) else: # logger.debug(node) # data = get_node(repopath, node, snapshot_source, brnch) # if data: # insert_one_document(data, data['collection'], dbname) # snapshot_data[node['snapshotId']] = True validate = node['validate'] if 'validate' in node else True if 'snapshotId' in node: logger.debug(node) data = get_node(repopath, node, snapshot, brnch, sub_data) if data: if validate: if get_dbtests(): if get_collection_size(data['collection']) == 0: #Creating indexes for collection create_indexes(data['collection'], config_value(DATABASE, DBNAME), [('snapshotId', pymongo.ASCENDING), ('timestamp', pymongo.DESCENDING)]) create_indexes( data['collection'], config_value(DATABASE, DBNAME), [ ('_id', pymongo.DESCENDING), ('timestamp', pymongo.DESCENDING), ('snapshotId', pymongo.ASCENDING) ] ) insert_one_document(data, data['collection'], dbname) else: snapshot_dir = make_snapshots_dir(container) if snapshot_dir: store_snapshot(snapshot_dir, data) if 'masterSnapshotId' in node: snapshot_data[node['snapshotId']] = node['masterSnapshotId'] else: snapshot_data[node['snapshotId']] = True # else: # snapshot_data[node['snapshotId']] = False node['status'] = 'active' else: node['status'] = 'inactive' logger.debug('Type: %s', type(data)) elif 'masterSnapshotId' in node: alldata = get_all_nodes(repopath, node, snapshot, brnch, sub_data) if alldata: snapshot_data[node['masterSnapshotId']] = [] for data in alldata: snapshot_data[node['masterSnapshotId']].append( { 'snapshotId': data['snapshotId'], 'path': data['path'], 'validate': validate }) logger.debug('Type: %s', type(alldata)) if baserepo and os.path.exists(baserepo): # logger.info('\t\tCLEANING Repo: %s', baserepo) shutil.rmtree(baserepo) return snapshot_data
def populate_google_snapshot(snapshot, container=None): """ This is an entrypoint for populating a snapshot of type google. All snapshot connectors should take snapshot object and based on 'source' field create a method to connect to the service for the connector. The 'source' field could be used by more than one snapshot, so the 'testuser' attribute should match to the user the 'source' """ dbname = config_value('MONGODB', 'dbname') snapshot_source = get_field_value(snapshot, 'source') snapshot_user = get_field_value(snapshot, 'testUser') project_id = get_field_value(snapshot, 'project-id') sub_data = get_google_data(snapshot_source) snapshot_nodes = get_field_value(snapshot, 'nodes') snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes) if valid_snapshotids and sub_data and snapshot_nodes: logger.debug(sub_data) try: for node in snapshot['nodes']: validate = node['validate'] if 'validate' in node else True logger.info(node) node_type = get_field_value_with_default(node, 'type',"") credentials = get_google_client_data(sub_data, snapshot_user, node_type, project_id) if not credentials: logger.info("No GCE connection in the snapshot to access Google resource!...") return snapshot_data if 'snapshotId' in node: if validate: data = get_node(credentials, node, snapshot_source, snapshot) if data: error_str = data.pop('error', None) if get_dbtests(): if get_collection_size(data['collection']) == 0: #Creating indexes for collection create_indexes( data['collection'], config_value(DATABASE, DBNAME), [ ('snapshotId', pymongo.ASCENDING), ('timestamp', pymongo.DESCENDING) ] ) create_indexes( data['collection'], config_value(DATABASE, DBNAME), [ ('_id', pymongo.DESCENDING), ('timestamp', pymongo.DESCENDING), ('snapshotId', pymongo.ASCENDING) ] ) insert_one_document(data, data['collection'], dbname) else: snapshot_dir = make_snapshots_dir(container) if snapshot_dir: store_snapshot(snapshot_dir, data) if 'masterSnapshotId' in node: snapshot_data[node['snapshotId']] = node['masterSnapshotId'] else: snapshot_data[node['snapshotId']] = False if error_str else True else: node['status'] = 'inactive' elif 'masterSnapshotId' in node: data = get_all_nodes(credentials, node, snapshot_source, snapshot, snapshot_data) logger.debug('Type: %s', type(data)) except Exception as ex: logger.info('Unable to create Google client: %s', ex) raise ex return snapshot_data
def populate_azure_snapshot(snapshot, container=None, snapshot_type='azure'): """ Populates the resources from azure.""" dbname = config_value('MONGODB', 'dbname') snapshot_source = get_field_value(snapshot, 'source') snapshot_user = get_field_value(snapshot, 'testUser') snapshot_nodes = get_field_value(snapshot, 'nodes') snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes) client_id, client_secret, sub_name, sub_id, tenant_id = \ get_web_client_data(snapshot_type, snapshot_source, snapshot_user) if not client_id: # logger.info("No client_id in the snapshot to access azure resource!...") raise Exception("No client id in the snapshot to access azure resource!...") # Read the client secrets from envirnment variable if not client_secret: client_secret = os.getenv(snapshot_user, None) if client_secret: logger.info('Client Secret from environment variable, Secret: %s', '*' * len(client_secret)) # Read the client secrets from the vault if not client_secret: client_secret = get_vault_data(client_id) if client_secret: logger.info('Client Secret from Vault, Secret: %s', '*' * len(client_secret)) elif get_from_currentdata(CUSTOMER): logger.error("Client Secret key does not set in a vault") raise Exception("Client Secret key does not set in a vault") if not client_secret: raise Exception("No `client_secret` key in the connector file to access azure resource!...") logger.info('\t\tSubscription: %s', sub_id) logger.info('\t\tTenant: %s', tenant_id) logger.info('\t\tclient: %s', client_id) put_in_currentdata('clientId', client_id) put_in_currentdata('clientSecret', client_secret) put_in_currentdata('subscriptionId', sub_id) put_in_currentdata('tenant_id', tenant_id) token = get_access_token() logger.debug('TOKEN: %s', token) if not token: logger.info("Unable to get access token, will not run tests....") raise Exception("Unable to get access token, will not run tests....") # return {} # snapshot_nodes = get_field_value(snapshot, 'nodes') # snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes) if valid_snapshotids and token and snapshot_nodes: for node in snapshot_nodes: validate = node['validate'] if 'validate' in node else True if 'path' in node: data = get_node(token, sub_name, sub_id, node, snapshot_user, snapshot_source) if data: if validate: if get_dbtests(): if get_collection_size(data['collection']) == 0: # Creating indexes for collection create_indexes( data['collection'], config_value(DATABASE, DBNAME), [ ('snapshotId', pymongo.ASCENDING), ('timestamp', pymongo.DESCENDING) ] ) create_indexes( data['collection'], config_value(DATABASE, DBNAME), [ ('_id', pymongo.DESCENDING), ('timestamp', pymongo.DESCENDING), ('snapshotId', pymongo.ASCENDING) ] ) insert_one_document(data, data['collection'], dbname, check_keys=False) else: snapshot_dir = make_snapshots_dir(container) if snapshot_dir: store_snapshot(snapshot_dir, data) if 'masterSnapshotId' in node: snapshot_data[node['snapshotId']] = node['masterSnapshotId'] else: snapshot_data[node['snapshotId']] = True # else: # snapshot_data[node['snapshotId']] = False node['status'] = 'active' else: # TODO alert if notification enabled or summary for inactive. node['status'] = 'inactive' logger.debug('Type: %s', type(data)) else: alldata = get_all_nodes( token, sub_name, sub_id, node, snapshot_user, snapshot_source) if alldata: snapshot_data[node['masterSnapshotId']] = [] for data in alldata: # insert_one_document(data, data['collection'], dbname) found_old_record = False for masterSnapshotId, snapshot_list in snapshot_data.items(): old_record = None if isinstance(snapshot_list, list): for item in snapshot_list: if item["path"] == data['path']: old_record = item if old_record: found_old_record = True if node['masterSnapshotId'] not in old_record['masterSnapshotId']: old_record['masterSnapshotId'].append( node['masterSnapshotId']) if not found_old_record: snapshot_data[node['masterSnapshotId']].append( { 'masterSnapshotId': [node['masterSnapshotId']], 'snapshotId': data['snapshotId'], 'path': data['path'], 'validate': validate, 'status': 'active' }) # snapshot_data[node['masterSnapshotId']] = True logger.debug('Type: %s', type(alldata)) delete_from_currentdata('resources') delete_from_currentdata('clientId') delete_from_currentdata('client_secret') delete_from_currentdata('subscriptionId') delete_from_currentdata('tenant_id') delete_from_currentdata('token') return snapshot_data
def populate_aws_snapshot(snapshot, container=None): """ This is an entrypoint for populating a snapshot of type aws. All snapshot connectors should take snapshot object and based on 'source' field create a method to connect to the service for the connector. The 'source' field could be used by more than one snapshot, so the 'testuser' attribute should match to the user the 'source' """ dbname = config_value('MONGODB', 'dbname') snapshot_source = get_field_value(snapshot, 'source') snapshot_user = get_field_value(snapshot, 'testUser') account_id = get_field_value(snapshot, 'accountId') sub_data = get_aws_data(snapshot_source) snapshot_nodes = get_field_value(snapshot, 'nodes') snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes) # valid_snapshotids = True # if snapshot_nodes: # for node in snapshot_nodes: # snapshot_data[node['snapshotId']] = False # if not isinstance(node['snapshotId'], str): # valid_snapshotids = False # if not valid_snapshotids: # logger.error('All snap') if valid_snapshotids and sub_data and snapshot_nodes: logger.debug(sub_data) access_key, secret_access, region, connector_client_str = \ get_aws_client_data(sub_data, snapshot_user, account_id) if not access_key: logger.info( "No access_key in the snapshot to access aws resource!...") raise Exception( "No access_key in the snapshot to access aws resource!...") # return snapshot_data # Read the client secrets from envirnment variable or Standard input # if not secret_access and ('UAMI' not in os.environ or os.environ['UAMI'] != 'true'): # secret_access = get_client_secret() # logger.info('Environment variable or Standard input, Secret: %s', '*' * len(secret_access)) # Read the client secrets from the vault if not secret_access: secret_access = get_vault_data(access_key) if secret_access: logger.info('Vault Secret: %s', '*' * len(secret_access)) else: logger.info("Secret Access key does not set in a vault") raise Exception("Secret Access key does not set in a vault") if not secret_access: logger.info( "No secret_access in the snapshot to access aws resource!...") return snapshot_data if access_key and secret_access: # existing_aws_client = {} for node in snapshot['nodes']: mastercode = False if 'snapshotId' in node: client_str, aws_region = _get_aws_client_data_from_node( node, default_client=connector_client_str, default_region=region) if not _validate_client_name(client_str): logger.error("Invalid Client Name") return snapshot_data try: awsclient = client(client_str.lower(), aws_access_key_id=access_key, aws_secret_access_key=secret_access, region_name=aws_region) except Exception as ex: logger.info('Unable to create AWS client: %s', ex) awsclient = None logger.info(awsclient) if awsclient: data = get_node(awsclient, node, snapshot_source) if data: error_str = data.pop('error', None) if get_dbtests(): if get_collection_size( data['collection']) == 0: #Creating indexes for collection create_indexes( data['collection'], config_value(DATABASE, DBNAME), [('snapshotId', pymongo.ASCENDING), ('timestamp', pymongo.DESCENDING)]) check_key = is_check_keys_required(data) insert_one_document(data, data['collection'], dbname, check_key) else: snapshot_dir = make_snapshots_dir(container) if snapshot_dir: store_snapshot(snapshot_dir, data) if 'masterSnapshotId' in node: snapshot_data[node['snapshotId']] = node[ 'masterSnapshotId'] else: snapshot_data[node[ 'snapshotId']] = False if error_str else True elif 'masterSnapshotId' in node: mastercode = True client_str, aws_region = _get_aws_client_data_from_node( node, default_client=connector_client_str, default_region=region) if not _validate_client_name(client_str): logger.error("Invalid Client Name") return snapshot_data if aws_region: all_regions = [aws_region] else: all_regions = Session().get_available_regions( client_str.lower()) if client_str.lower() in ['s3', 'cloudtrail']: all_regions = ['us-west-1'] logger.info("Length of all regions is %s" % (str(len(all_regions)))) count = 0 snapshot_data[node['masterSnapshotId']] = [] for each_region in all_regions: logger.info(each_region) try: awsclient = client( client_str.lower(), aws_access_key_id=access_key, aws_secret_access_key=secret_access, region_name=each_region) except Exception as ex: logger.info('Unable to create AWS client: %s', ex) logger.info(awsclient) if awsclient: all_data = get_all_nodes(awsclient, node, snapshot, sub_data) if all_data: for data in all_data: snapshot_data[ node['masterSnapshotId']].append({ 'snapshotId': '%s%s' % (node['masterSnapshotId'], str(count)), 'validate': True, 'detailMethods': data['detailMethods'], 'structure': 'aws', 'masterSnapshotId': node['masterSnapshotId'], 'collection': data['collection'], 'arn': data['arn'] }) count += 1 if mastercode: snapshot_data = eliminate_duplicate_snapshots(snapshot_data) return snapshot_data
def populate_arm_snapshot(container, dbname, snapshot_source, sub_data, snapshot_data, node, repopath): """ Populate snapshot by running arm command """ dir_path = get_field_value(sub_data, 'folderPath') if not dir_path: dir_path = repopath location = get_field_value(node, 'location') paths = get_field_value(node, 'paths') template_file_path = "" deployment_file_path = "" if paths and isinstance(paths, list): if not location: logger.error("Invalid json : 'location' field is required in node") node['status'] = 'inactive' return snapshot_data for json_file in paths: json_file_path = '%s/%s.json' % (dir_path, json_file) json_data = json_from_file(json_file_path) if not json_data: logger.error("Invalid path or json") node['status'] = 'inactive' return snapshot_data elif "$schema" not in json_data: logger.error( "Invalid json : does not contains '$schema' field in json." ) node['status'] = 'inactive' return snapshot_data else: if "deploymentTemplate.json" in json_data['$schema'].split( "/")[-1]: template_file_path = json_file_path elif "deploymentParameters.json" in json_data['$schema'].split( "/")[-1]: deployment_file_path = json_file_path else: logger.error( "Invalid json : $schema does not contains the correct value" ) if template_file_path and deployment_file_path: response = invoke_az_cli("deployment validate --location " + location + " --template-file " + template_file_path + " --parameters @" + deployment_file_path) data_record = create_database_record(node, snapshot_source, response, sub_data) if get_dbtests(): if get_collection_size(node['collection']) == 0: #Creating indexes for collection create_indexes(node['collection'], config_value(DATABASE, DBNAME), [('snapshotId', pymongo.ASCENDING), ('timestamp', pymongo.DESCENDING)]) insert_one_document(data_record, node['collection'], dbname) else: snapshot_dir = make_snapshots_dir(container) if snapshot_dir: store_snapshot(snapshot_dir, data_record) snapshot_data[ node['snapshotId']] = False if data_record['error'] else True node['status'] = 'active' else: node['status'] = 'inactive' else: node['status'] = 'inactive' logger.error( "Invalid json : `paths` field is missing for 'arm' node type or it is not a list" ) return snapshot_data
def populate_custom_snapshot(snapshot, container=None): """ Populates the resources from git.""" dbname = config_value('MONGODB', 'dbname') snapshot_source = get_field_value(snapshot, 'source') sub_data = get_custom_data(snapshot_source) snapshot_nodes = get_field_value(snapshot, 'nodes') snapshot_data, valid_snapshotids = validate_snapshot_nodes(snapshot_nodes) if valid_snapshotids and sub_data and snapshot_nodes: baserepo, repopath = _get_repo_path(sub_data, snapshot) if repopath: brnch = get_field_value_with_default(sub_data, 'branchName', 'master') for node in snapshot_nodes: node_type = node[ 'type'] if 'type' in node and node['type'] else 'json' if node_type == 'arm': if 'snapshotId' in node: populate_arm_snapshot(container, dbname, snapshot_source, sub_data, snapshot_data, node, repopath) elif 'masterSnapshotId' in node: populate_all_arm_snapshot(snapshot, dbname, sub_data, node, repopath, snapshot_data) else: # logger.debug(node) # data = get_node(repopath, node, snapshot_source, brnch) # if data: # insert_one_document(data, data['collection'], dbname) # snapshot_data[node['snapshotId']] = True validate = node['validate'] if 'validate' in node else True if 'snapshotId' in node: logger.debug(node) data = get_node(repopath, node, snapshot, brnch, sub_data) if data: if validate: if get_dbtests(): if get_collection_size( data['collection']) == 0: #Creating indexes for collection create_indexes( data['collection'], config_value(DATABASE, DBNAME), [('snapshotId', pymongo.ASCENDING), ('timestamp', pymongo.DESCENDING) ]) insert_one_document( data, data['collection'], dbname) else: snapshot_dir = make_snapshots_dir( container) if snapshot_dir: store_snapshot(snapshot_dir, data) if 'masterSnapshotId' in node: snapshot_data[node['snapshotId']] = node[ 'masterSnapshotId'] else: snapshot_data[node['snapshotId']] = True else: snapshot_data[node['snapshotId']] = False node['status'] = 'active' else: node['status'] = 'inactive' logger.debug('Type: %s', type(data)) elif 'masterSnapshotId' in node: alldata = get_all_nodes(repopath, node, snapshot, brnch, sub_data) if alldata: snapshot_data[node['masterSnapshotId']] = [] for data in alldata: snapshot_data[node['masterSnapshotId']].append( { 'snapshotId': data['snapshotId'], 'path': data['path'], 'validate': True }) logger.debug('Type: %s', type(alldata)) if baserepo and os.path.exists(baserepo): logger.info('Repo path: %s', baserepo) shutil.rmtree(baserepo) return snapshot_data