def _delete_ckpt(self, config_name, stanza_id): if config_name == 'aws_cloudtrail': try: from splunk_ta_aws.modinputs.cloudtrail import delete_ckpt delete_ckpt(stanza_id) except Exception as exc: if (isinstance(exc, IOError) and 'No such file or directory' in str(exc)): return RestError(500, 'Failed to delete checkpoint') elif config_name == 'aws_s3': try: from splunk_ta_aws.modinputs.generic_s3 import delete_ckpt delete_ckpt(stanza_id) except Exception as exc: if (isinstance(exc, IOError) and 'No such file or directory' in str(exc)): return RestError(500, 'Failed to delete checkpoint') elif config_name == 'splunk_ta_aws_logs': try: from splunk_ta_aws.modinputs.incremental_s3 import delete_data_input delete_data_input(stanza_id) except Exception as exc: if (isinstance(exc, IOError) and 'No such file or directory' in str(exc)): return RestError(500, 'Failed to delete checkpoint') return True
def handle(*args, **kwargs): try: return func(*args, **kwargs) except: raise RestError( 500, 'Migrating failed. %s' % traceback.format_exc() )
def handleList(self, confInfo): try: if self.callerArgs.id is None: accs = self.all() for name, ent in accs.items(): self.makeConfItem(name, self.skip_cred(ent), confInfo) else: self.makeConfItem( self.callerArgs.id, self.skip_cred(self.get(self.callerArgs.id)), confInfo, ) except Exception as exc: raise RestError(400, exc)
def handleCreate(self, confInfo): """Create inputs. """ name = self.callerArgs.id (old_inputs, temp) = self._get_inputs_by_name(name) if len(old_inputs) > 0: raise RestError( 400, ' Name "%s" is already in use' % name ) inputs_dict = self._get_inputs_from_req(self.callerArgs) self._create_inputs(inputs_dict) return
def handleList(self, confInfo): aws_account = None aws_account_category = tac.RegionCategory.COMMERCIAL if self.callerArgs['aws_account'] is not None: aws_account = self.callerArgs['aws_account'][0] aws_iam_role = None if self.callerArgs.get('aws_iam_role') is not None: aws_iam_role = self.callerArgs['aws_iam_role'][0] if not aws_account: confInfo['bucket_name'].append('bucket_name', []) return # Set proxy for boto3 proxy = pc.get_proxy_info(self.getSessionKey()) tacommon.set_proxy_env(proxy) cred_service = tacommon.create_credentials_service( get_splunkd_uri(), self.getSessionKey()) try: cred = cred_service.load(aws_account, aws_iam_role) aws_account_category = cred.category except ClientError as err: raise RestError(400, str(err.message) + '. Please make sure the AWS Account and Assume Role are correct.') host_name = tac.CATEGORY_HOST_NAME_MAP[aws_account_category] connection = connect_s3( cred.aws_access_key_id, cred.aws_secret_access_key, self.getSessionKey(), host_name, security_token=cred.aws_session_token, ) rs = timed(25, all_buckets, [], (connection,)) rlist = [] for r in rs: rlist.append(r.name) for bucket in rlist: confInfo[bucket].append('bucket_name', bucket) confInfo[bucket].append('host_name', host_name)
def _list_rules(self, conf_info): aws_account = self.callerArgs.data['aws_account'][0] aws_iam_role = self.callerArgs.data.get('aws_iam_role', [None])[0] region_name = self.callerArgs.data['aws_region'][0] scheme, host, port = get_splunkd_access_info() service = Service(scheme=scheme, host=host, port=port, token=self.getSessionKey()) config = ConfigManager(service) factory = AWSCredentialsProviderFactory(config) provider = factory.create(aws_account, aws_iam_role) credentials_cache = AWSCredentialsCache(provider) client = credentials_cache.client('config', region_name) all_rules = [] next_token = "" while 1: try: response = client.describe_config_rules(NextToken=next_token) except Exception as e: logger.error('Failed to describe config rules') msg = str(e.message) logger.error(msg) raise RestError(400, 'Failed to describe config rules: ' + msg) if not tacommon.is_http_ok(response): logger.error("Failed to describe config rules, errorcode=%s", tacommon.http_code(response)) return rules = response.get("ConfigRules") if not rules: break all_rules.extend(rule["ConfigRuleName"] for rule in rules) next_token = response.get("NextToken") if not next_token: break for rule in all_rules: conf_info[rule].append("rule_names", rule)
def _delete_ckpt(self, config_name, stanza_id): if config_name == 'aws_cloudtrail': from splunk_ta_aws.modinputs.cloudtrail import delete_ckpt elif config_name == 'aws_s3': from splunk_ta_aws.modinputs.generic_s3 import delete_ckpt elif config_name == 'splunk_ta_aws_logs': from splunk_ta_aws.modinputs.incremental_s3 import delete_data_input as delete_ckpt elif config_name == 'aws_billing_cur': from splunk_ta_aws.modinputs.billing import delete_ckpt as delete_ckpt try: delete_ckpt(stanza_id) except NameError: return False except Exception as exc: if (isinstance(exc, IOError) and 'No such file or directory' in str(exc)): return RestError(500, 'Failed to delete checkpoint for input %s' % config_name) return True
def separate_inputs(name, origin_input, fields): """Separate group input into stanzas. If there are multiple values in `fields`, then they will be separated into multiple inputs. All other fields will be remains the same. In some cases, for example, the user clicks "disable" in UI, only `{disabled: 1}` will be sent and there is no group fields. Args: name (string): The user-input name origin_input (dict): The original input posted by the frontend fields (list): Fields that will be separated. Returns: dict: Dict of separated inputs. """ group_count = -1 for f in fields: # In case of `{disabled: 1}`, there is no group fields. if f not in origin_input: continue try: origin_input[f] = json.loads(origin_input[f]) except: raise RestError( 400, origin_input[f] ) # Make sure every fields in group input have the same number of items. if group_count == -1: group_count = len(origin_input[f]) else: if group_count != len(origin_input[f]): logger.error('Group input fields does not match') raise RestError( 400, 'Group input fields does not match' ) # Check if there is any empty value. Any empty field will be an empty string (not None). if f in ALLOW_EMPTY: continue if '' in origin_input[f]: logger.error('Field %s cannot be empty' % f) raise RestError( 400, 'Field %s cannot be empty' % f ) inputs_dict = dict() inputs_set = set() for i in range(group_count): separated_input = dict() group_fields = dict() for k in origin_input: if k in fields: separated_input[k] = origin_input[k][i] group_fields[k] = origin_input[k][i] else: separated_input[k] = origin_input[k] input_str = str(group_fields) if input_str in inputs_set: raise RestError( 400, 'Duplicated input %s' % input_str ) inputs_set.add(input_str) inputs_dict[_build_name(name)] = separated_input if len(inputs_dict) > 0: logger.info('%d inputs are generated.' % (len(inputs_dict))) else: inputs_dict = { 'unknown': origin_input } logger.info('No input is generated based on group fields. Wrap the original ' 'one as an special input %s to keep the output consistent' % str(inputs_dict)) return inputs_dict
def __init__(self): threading.Thread.__init__(self) self.result = default self.error = None def run(self): try: self.result = func(*args, **kwargs) except Exception, exc: self.error = exc it = FuncThread() it.start() it.join(timeout) if it.error: raise RestError(400, it.error) return it.result def all_buckets(s3_conn): return s3_conn.get_all_buckets() class ConfigHandler(splunk.admin.MConfigHandler): def setup(self): self.supportedArgs.addReqArg('aws_account') self.supportedArgs.addOptArg('aws_iam_role') def handleList(self, confInfo): aws_account = None
def handleList(self, confInfo): service = self.callerArgs.data['aws_service'][0] account_category = None for account_arg in ACCOUNT_OPT_ARGS: if account_arg in self.callerArgs.data: account_name = self.callerArgs.data[account_arg][0] account = tacommon.get_account(get_splunkd_uri(), self.getSessionKey(), account_name) account_category = account.category break if service == 'aws_cloudwatch': import boto.ec2.cloudwatch regions = boto.ec2.cloudwatch.regions() elif service == 'aws_cloudtrail': import boto.cloudtrail regions = boto.cloudtrail.regions() elif service == 'aws_config': import boto.sqs regions = boto.sqs.regions() elif service == 'aws_config_rule': import boto.configservice # FIXME, hard code for now regions = DummyRegion.from_names([ 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'ap-northeast-2', 'eu-central-1', 'eu-west-1', ]) elif service == 'aws_cloudwatch_logs': import boto.logs regions = boto.logs.regions() elif service == 'aws_description': import boto.ec2 regions = boto.ec2.regions() elif service == 'aws_inspector': regions = DummyRegion.from_names([ 'us-east-1', 'us-west-2', 'ap-northeast-2', 'ap-south-1', 'ap-southeast-2', 'ap-northeast-1', 'eu-west-1', ]) elif service == 'aws_kinesis': regions = DummyRegion.from_names( DUMMY_BOTO3_SESSION.get_available_regions('kinesis', 'aws') + DUMMY_BOTO3_SESSION.get_available_regions('kinesis', 'aws-us-gov') + DUMMY_BOTO3_SESSION.get_available_regions('kinesis', 'aws-cn') ) elif service == 'aws_sqs_based_s3' or service == 'splunk_ta_aws_sqs': import boto.sqs regions = boto.sqs.regions() elif service == 'aws_s3': import boto.s3 regions = boto.s3.regions() else: msg = "Unsupported aws_service={} specified.".format(service) raise RestError(400, msg) descriptions = _load_description_of_regions() for region in regions: region_category = tac.RegionCategory.COMMERCIAL if region.name.find('us-gov-') != -1: region_category = tac.RegionCategory.USGOV elif region.name.find('cn-') != -1: region_category = tac.RegionCategory.CHINA if account_category is None or region_category == account_category: confInfo[region.name].append('label', descriptions[region.name]['description']) if len(confInfo) == 0: raise RestError(400, 'This service is not available for your AWS account.')
def handleList(self, conf_info): """ Handling the input request and populating the different security policies :param conf_info: The conf_info is used to pass the data from python handler to the javascript :return: """ session_key = self.getSessionKey() log_level = get_log_level(session_key, logger) logger.setLevel(log_level) try: account_name = self.callerArgs.data["account_name"][0] account_details = get_account_details(session_key, account_name, logger) proxy_settings = get_proxy_settings(session_key, logger) # logger.info("Account details={}".format(account_details)) client_id = account_details.get('client_id') client_secret = account_details.get('client_secret') access_token = account_details.get('access_token') refresh_token = account_details.get('refresh_token') site_name = '*' site_id = [] lansweeper = Lansweeper(client_id=client_id, client_secret=client_secret, access_token=access_token, refresh_token=refresh_token, proxy_settings=proxy_settings, logger=logger) try: status_code, response = lansweeper.get_site_id(site_name) if status_code != 200: is_expired_response = lansweeper.is_token_expired( status_code, response.text) if is_expired_response: lansweeper.access_token = is_expired_response[ 'access_token'] lansweeper.refresh_token = is_expired_response[ 'refresh_token'] # Updating the access token and refresh token in the conf files try: update_access_token( access_token=is_expired_response[ 'access_token'], refresh_token=is_expired_response[ 'refresh_token'], client_secret=client_secret, session_key=session_key, stanza_name=account_name) logger.info( 'Successfully updated the new access token and refresh token in the conf file' ) except Exception as exception: logger.warning( 'Error while updating the access token and refresh token in the conf file, error={}' .format(exception)) status_code, response = lansweeper.get_site_id( site_name) if status_code != 200: logger.error( 'Error while fetching the site id for site={}, status code={} response={}' .format(site_name, status_code, response)) raise RestError( 409, "Error while fetching the sites for the account. Please enter the site names manually" ) else: logger.info( 'Successfully fetch the site code for site={}'. format(site_name)) site_id = response else: logger.error( 'Error while fetching the site id for site={}, status code={} response={}' .format(site_name, status_code, response)) raise RestError( 409, "Error while fetching the sites for the account. Please enter the site names manually" ) else: logger.info( 'Successfully fetch the site code for site={}'.format( site_name)) site_id = response except Exception as exception: logger.exception( 'Error while fetching the site id for site={}'.format( site_name)) raise RestError( 409, "Error while fetching the sites for the account. Please enter the site names manually" ) sites = [] for profile in site_id: conf_info[profile['site']['name']].append( 'value', profile['site']['id']) logger.info('Returning sites={}'.format(sites)) except Exception as e: logger.exception( "Error while getting the sites, error={}".format(e)) raise RestError( 409, "Error while fetching the sites for the account. Please enter the site names manually" )