def _migrate(self): internal_endpoint = self.endpoint.internal_endpoint if not (internal_endpoint.endswith('settings') or internal_endpoint.endswith('account')): return splunkd_info = urlparse(get_splunkd_uri()) self.base_app_name = util.get_base_app_name() self.conf_mgr = ConfManager( self.getSessionKey(), self.base_app_name, scheme=splunkd_info.scheme, host=splunkd_info.hostname, port=splunkd_info.port, ) self.client = SplunkRestClient( self.getSessionKey(), self.base_app_name, scheme=splunkd_info.scheme, host=splunkd_info.hostname, port=splunkd_info.port, ) self.legacy_passwords = None # migration legacy configuration in related conf files if internal_endpoint.endswith('settings'): self._migrate_conf() self._migrate_conf_customized() elif internal_endpoint.endswith('account'): self._migrate_conf_credential()
def _get_region_host(self, session_key, payload): config = pc.get_proxy_info(session_key) tacommon.set_proxy_env(config) credentials_service = tacommon.create_credentials_service( get_splunkd_uri(), session_key) credentials = credentials_service.load( payload[tac.aws_account], payload[tac.aws_iam_role], ) config[tac.key_id] = credentials.aws_access_key_id config[tac.secret_key] = credentials.aws_secret_access_key config['aws_session_token'] = credentials.aws_session_token config[asc.bucket_name] = payload[asc.bucket_name] config[asc.host_name] = tac.CATEGORY_HOST_NAME_MAP[ credentials.category] if config[asc.host_name] == asc.default_host: region = get_region_for_bucketname(config) with open(ENDPOINTS_PATH, 'r') as endpoints_file: endpoints = json.load(endpoints_file) host_name = EndpointResolver(endpoints).construct_endpoint( 's3', region).get('hostname', asc.default_host) else: pattern = r's3[.-]([\w-]+)\.amazonaws.com' m = re.search(pattern, config[asc.host_name]) region = m.group(1) if m else 'us-east-1' host_name = config[asc.host_name] return (region, host_name)
def get_splunkd_endpoint(): if os.environ.get("SPLUNKD_URI"): return os.environ["SPLUNKD_URI"] else: splunkd_uri = get_splunkd_uri() os.environ["SPLUNKD_URI"] = splunkd_uri return splunkd_uri
def get_splunkd_endpoint(): if os.environ.get('SPLUNKD_URI'): return os.environ['SPLUNKD_URI'] else: splunkd_uri = get_splunkd_uri() os.environ['SPLUNKD_URI'] = splunkd_uri return splunkd_uri
def test_splunkd_uri(monkeypatch): common.mock_splunkhome(monkeypatch) uri = splunkenv.get_splunkd_uri() assert uri == "https://127.0.0.1:8089" monkeypatch.setenv("SPLUNK_BINDIP", "10.0.0.2:7080") uri = splunkenv.get_splunkd_uri() assert uri == "https://10.0.0.2:8089" monkeypatch.setenv("SPLUNK_BINDIP", "10.0.0.3") uri = splunkenv.get_splunkd_uri() assert uri == "https://10.0.0.3:8089" monkeypatch.setenv("SPLUNKD_URI", "https://10.0.0.1:8089") uri = splunkenv.get_splunkd_uri() assert uri == "https://10.0.0.1:8089"
def __init__(self, *args, **kwargs): # use classic inheritance to be compatible for # old version of Splunk private SDK admin.MConfigHandler.__init__(self, *args, **kwargs) self.handler = RestHandler( get_splunkd_uri(), self.getSessionKey(), self.endpoint, ) self.payload = self._convert_payload()
def handleList(self, conf_info): logger.info("start listing kinesis streams") for required in self.valid_params: if not self.callerArgs or not self.callerArgs.get(required): logger.error('Missing "%s"', required) raise Exception('Missing "{}"'.format(required)) aws_account = "" if self.callerArgs[tac.account] is not None: aws_account = self.callerArgs[tac.account][0] aws_iam_role = None if self.callerArgs.get(tac.aws_iam_role) is not None: aws_iam_role = self.callerArgs[tac.aws_iam_role][0] # Set proxy for boto3 proxy = pc.get_proxy_info(self.getSessionKey()) tacommon.set_proxy_env(proxy) cred_service = tacommon.create_credentials_service( get_splunkd_uri(), self.getSessionKey()) cred = cred_service.load(aws_account, aws_iam_role) proxy[tac.server_uri] = get_splunkd_uri() proxy[tac.session_key] = self.getSessionKey() proxy[tac.aws_account] = aws_account proxy[tac.aws_iam_role] = aws_iam_role proxy[tac.region] = self.callerArgs[tac.region][0] proxy[tac.key_id] = cred.aws_access_key_id proxy[tac.secret_key] = cred.aws_secret_access_key proxy['aws_session_token'] = cred.aws_session_token client = akc.KinesisClient(proxy, logger) streams = client.list_streams() for stream in streams: conf_info[stream].append("stream_names", stream) logger.info("end of listing kinesis streams")
def handleList(self, confInfo): aws_account = None aws_account_category = tac.RegionCategory.COMMERCIAL if self.callerArgs['aws_account'] is not None: aws_account = self.callerArgs['aws_account'][0] aws_iam_role = None if self.callerArgs.get('aws_iam_role') is not None: aws_iam_role = self.callerArgs['aws_iam_role'][0] if not aws_account: confInfo['bucket_name'].append('bucket_name', []) return # Set proxy for boto3 proxy = pc.get_proxy_info(self.getSessionKey()) tacommon.set_proxy_env(proxy) cred_service = tacommon.create_credentials_service( get_splunkd_uri(), self.getSessionKey()) try: cred = cred_service.load(aws_account, aws_iam_role) aws_account_category = cred.category except ClientError as err: raise RestError(400, str(err.message) + '. Please make sure the AWS Account and Assume Role are correct.') host_name = tac.CATEGORY_HOST_NAME_MAP[aws_account_category] connection = connect_s3( cred.aws_access_key_id, cred.aws_secret_access_key, self.getSessionKey(), host_name, security_token=cred.aws_session_token, ) rs = timed(25, all_buckets, [], (connection,)) rlist = [] for r in rs: rlist.append(r.name) for bucket in rlist: confInfo[bucket].append('bucket_name', bucket) confInfo[bucket].append('host_name', host_name)
def handleList(self, confInfo): service = self.callerArgs.data['aws_service'][0] account_category = None for account_arg in ACCOUNT_OPT_ARGS: if account_arg in self.callerArgs.data: account_name = self.callerArgs.data[account_arg][0] account = tacommon.get_account(get_splunkd_uri(), self.getSessionKey(), account_name) account_category = account.category break if service == 'aws_cloudwatch': import boto.ec2.cloudwatch regions = boto.ec2.cloudwatch.regions() elif service == 'aws_cloudtrail': import boto.cloudtrail regions = boto.cloudtrail.regions() elif service == 'aws_config': import boto.sqs regions = boto.sqs.regions() elif service == 'aws_config_rule': import boto.configservice # FIXME, hard code for now regions = DummyRegion.from_names([ 'us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1', 'ap-northeast-2', 'eu-central-1', 'eu-west-1', ]) elif service == 'aws_cloudwatch_logs': import boto.logs regions = boto.logs.regions() elif service == 'aws_description': import boto.ec2 regions = boto.ec2.regions() elif service == 'aws_inspector': regions = DummyRegion.from_names([ 'us-east-1', 'us-west-2', 'ap-northeast-2', 'ap-south-1', 'ap-southeast-2', 'ap-northeast-1', 'eu-west-1', ]) elif service == 'aws_kinesis': regions = DummyRegion.from_names( DUMMY_BOTO3_SESSION.get_available_regions('kinesis', 'aws') + DUMMY_BOTO3_SESSION.get_available_regions('kinesis', 'aws-us-gov') + DUMMY_BOTO3_SESSION.get_available_regions('kinesis', 'aws-cn') ) elif service == 'aws_sqs_based_s3' or service == 'splunk_ta_aws_sqs': import boto.sqs regions = boto.sqs.regions() elif service == 'aws_s3': import boto.s3 regions = boto.s3.regions() else: msg = "Unsupported aws_service={} specified.".format(service) raise RestError(400, msg) descriptions = _load_description_of_regions() for region in regions: region_category = tac.RegionCategory.COMMERCIAL if region.name.find('us-gov-') != -1: region_category = tac.RegionCategory.USGOV elif region.name.find('cn-') != -1: region_category = tac.RegionCategory.CHINA if account_category is None or region_category == account_category: confInfo[region.name].append('label', descriptions[region.name]['description']) if len(confInfo) == 0: raise RestError(400, 'This service is not available for your AWS account.')