Exemplo n.º 1
0
    def fetch_password_policy(self, credentials):
        """
        Fetch the password policy that applies to all IAM users within the AWS account
        """
        self.fetchstatuslogger.counts['password_policy']['discovered'] = 0
        self.fetchstatuslogger.counts['password_policy']['fetched'] = 0
        try:
            api_client = connect_service('iam', credentials, silent = True)
            self.password_policy = api_client.get_account_password_policy()['PasswordPolicy']
            if 'PasswordReusePrevention' not in self.password_policy:
                self.password_policy['PasswordReusePrevention'] = False
            else:
                self.password_policy['PreviousPasswordPrevented'] = self.password_policy['PasswordReusePrevention']
                self.password_policy['PasswordReusePrevention'] = True
            # There is a bug in the API: ExpirePasswords always returns false
            if 'MaxPasswordAge' in self.password_policy:
                self.password_policy['ExpirePasswords'] = True
            self.fetchstatuslogger.counts['password_policy']['discovered'] = 1
            self.fetchstatuslogger.counts['password_policy']['fetched'] = 1

        except ClientError as e:
            if e.response['Error']['Code'] == 'NoSuchEntity':
                    self.password_policy = {}
                    self.password_policy['MinimumPasswordLength'] = '1' # As of 10/10/2016, 1-character passwords were authorized when no policy exists, even though the console displays 6
                    self.password_policy['RequireUppercaseCharacters'] = False
                    self.password_policy['RequireLowercaseCharacters'] = False
                    self.password_policy['RequireNumbers'] = False
                    self.password_policy['RequireSymbols'] = False
                    self.password_policy['PasswordReusePrevention'] = False
                    self.password_policy['ExpirePasswords'] = False
            else:
                raise e
        except Exception as e:
            printError(str(e))
Exemplo n.º 2
0
    def fetch(self, credentials, services = [], regions = [], partition_name = ''):
        """

        :param credentials:
        :param services:
        :param regions:
        :param partition_name:
        :return:
        """
        for service in vars(self):
            try:
                if services != [] and service not in services:
                    continue
                service_config = getattr(self, service)
                if 'fetch_all' in dir(service_config):
                    method_args = {}
                    method_args['credentials'] = credentials
                    if service != 'iam':
                        method_args['regions'] = regions
                        method_args['partition_name'] = partition_name
                    service_config.fetch_all(**method_args)
                    if hasattr(service_config, 'finalize'):
                        service_config.finalize()
            except Exception as e:
                printError('Error: could not fetch %s configuration.' % service)
                printException(e)
Exemplo n.º 3
0
def get_s3_acls(api_client, bucket_name, bucket, key_name = None):
  try:
    grantees = {}
    if key_name:
        grants = api_client.get_object_acl(Bucket = bucket_name, Key = key_name)
    else:
        grants = api_client.get_bucket_acl(Bucket = bucket_name)
    for grant in grants['Grants']:
        if 'ID' in grant['Grantee']:
            grantee = grant['Grantee']['ID']
            display_name = grant['Grantee']['DisplayName'] if 'DisplayName' in grant['Grantee'] else grant['Grantee']['ID']
        elif 'URI' in grant['Grantee']:
            grantee = grant['Grantee']['URI'].split('/')[-1]
            display_name = s3_group_to_string(grant['Grantee']['URI'])
        else:
            grantee = display_name = 'Unknown'
        permission = grant['Permission']
        manage_dictionary(grantees, grantee, {})
        grantees[grantee]['DisplayName'] = display_name
        if 'URI' in grant['Grantee']:
            grantees[grantee]['URI'] = grant['Grantee']['URI']
        manage_dictionary(grantees[grantee], 'permissions', init_s3_permissions())
        set_s3_permissions(grantees[grantee]['permissions'], permission)
    return grantees
  except Exception as e:
    printError('Failed to get ACL configuration for %s: %s' % (bucket_name, e))
    return {}
Exemplo n.º 4
0
    def fetch_credential_report(self, credentials, ignore_exception = False):
        """
        Fetch the credential report

        :param: api_client
        :type: FOO
        :param: ignore_exception : initiate credential report creation as not  always ready
        :type: Boolean
        """
        iam_report = {}
        try:
            api_client = connect_service('iam', credentials, silent = True)
            response = api_client.generate_credential_report()
            if response['State'] != 'COMPLETE':
                if not ignore_exception:
                    printError('Failed to generate a credential report.')
                return
            report = api_client.get_credential_report()['Content']
            lines = report.splitlines()
            keys = lines[0].decode('utf-8').split(',')
            for line in lines[1:]:
                values = line.decode('utf-8').split(',')
                manage_dictionary(iam_report, values[0], {})
                for key, value in zip(keys, values):
                    iam_report[values[0]][key] = value
            self.credential_report = iam_report
            self.fetchstatuslogger.counts['credential_report']['fetched'] = 1
        except Exception as e:
            if ignore_exception:
                return
            printError('Failed to download a credential report.')
            printException(e)
Exemplo n.º 5
0
def __update_bucket_permissions(s3_info, iam_info, action, iam_entity, allowed_iam_entity, full_path, policy_type, policy_name):
    allowed_buckets = []
    # By default, all buckets are allowed
    for bucket_name in s3_info['buckets']:
        allowed_buckets.append(bucket_name)
    if policy_type == 'InlinePolicies':
        policy = iam_info[iam_entity.title()][allowed_iam_entity]['Policies'][policy_name]['PolicyDocument']
    elif policy_type == 'ManagedPolicies':
        policy = iam_info['ManagedPolicies'][policy_name]['PolicyDocument']
    else:
        printError('Error, found unknown policy type.')
    for statement in policy['Statement']:
        for target_path in statement['NotResource']:
            parts = target_path.split('/')
            bucket_name = parts[0].split(':')[-1]
            path = '/' + '/'.join(parts[1:]) if len(parts) > 1 else '/'
            if (path == '/' or path == '/*') and (bucket_name in allowed_buckets):
                # Remove bucket from list
                allowed_buckets.remove(bucket_name)
            elif bucket_name == '*':
                allowed_buckets = []
    policy_info = {}
    policy_info[policy_type] = {}
    policy_info[policy_type][policy_name] = iam_info['permissions']['Action'][action][iam_entity]['Allow'][allowed_iam_entity]['NotResource'][full_path][policy_type][policy_name]
    for bucket_name in allowed_buckets:
        __update_iam_permissions(s3_info, bucket_name, iam_entity, allowed_iam_entity, policy_info)
Exemplo n.º 6
0
    def load(self, rule_type, quiet = False):
        """
        Open a JSON file definiting a ruleset and load it into a Ruleset object

        :param quiet:
        :return:
        """
        if self.filename and os.path.exists(self.filename):
            try:
                with open(self.filename, 'rt') as f:
                    ruleset = json.load(f)
                    self.about = ruleset['about'] if 'about' in ruleset else ''
                    self.rules = {}
                    for filename in ruleset['rules']:
                        self.rules[filename] = []
                        for rule in ruleset['rules'][filename]:
                            self.handle_rule_versions(filename, rule_type, rule)
            except Exception as e:
                printException(e)
                printError('Error: ruleset file %s contains malformed JSON.' % self.filename)
                self.rules = []
                self.about = ''
        else:
            self.rules = []
            if not quiet:
                printError('Error: the file %s does not exist.' % self.filename)
Exemplo n.º 7
0
def get_s3_bucket_policy(api_client, bucket_name, bucket_info):
    try:
        bucket_info['policy'] = json.loads(api_client.get_bucket_policy(Bucket = bucket_name)['Policy'])
        return True
    except Exception as e:
        if not (type(e) == ClientError and e.response['Error']['Code'] == 'NoSuchBucketPolicy'):
            printError('Failed to get bucket policy for %s: %s' % (bucket_name, e))
        return False
Exemplo n.º 8
0
def get_s3_bucket_logging(api_client, bucket_name, bucket_info):
    try:
        logging = api_client.get_bucket_logging(Bucket = bucket_name)
        if 'LoggingEnabled' in logging:
            bucket_info['logging'] = logging['LoggingEnabled']['TargetBucket'] + '/' + logging['LoggingEnabled']['TargetPrefix']
            bucket_info['logging_stuff'] = logging
        else:
            bucket_info['logging'] = 'Disabled'
        return True
    except Exception as e:
        printError('Failed to get logging configuration for %s: %s' % (bucket_name, e))
        bucket_info['logging'] = 'Unknown'
        return False
Exemplo n.º 9
0
def get_s3_bucket_default_encryption(api_client, bucket_name, bucket_info):
    try:
        default_encryption = api_client.get_bucket_encryption(Bucket = bucket_name)
        bucket_info['default_encryption'] = 'Enabled'
        return True
    except ClientError as e:
        if 'ServerSideEncryptionConfigurationNotFoundError' in e.response['Error']['Code']:
            bucket_info['default_encryption'] = 'Disabled'
            return True
        else:
            printError('Failed to get encryption configuration for %s: %s' % (bucket_name, e))
            bucket_info['default_encryption'] = 'Unknown'
            return False
    except Exception as e:
        printError('Failed to get encryption configuration for %s: %s' % (bucket_name, e))
        bucket_info['default_encryption'] = 'Unknown'
        return False
Exemplo n.º 10
0
def pass_conditions(all_info, current_path, conditions, unknown_as_pass_condition = False):
    """
    Pass all conditions?

    :param all_info:
    :param current_path:
    :param conditions:
    :param unknown_as_pass_condition:   Consider an undetermined condition as passed
    :return:
    """
    result = False
    if len(conditions) == 0:
        return True
    condition_operator = conditions.pop(0)
    for condition in conditions:
        if condition[0] in condition_operators:
            res = pass_conditions(all_info, current_path, condition, unknown_as_pass_condition)
        else:
            # Conditions are formed as "path to value", "type of test", "value(s) for test"
            path_to_value, test_name, test_values = condition
            path_to_value = fix_path_string(all_info, current_path, path_to_value)
            target_obj = get_value_at(all_info, current_path, path_to_value)
            if type(test_values) != list:
                dynamic_value = re_get_value_at.match(test_values)
                if dynamic_value:
                    test_values = get_value_at(all_info, current_path, dynamic_value.groups()[0], True)
            try:
                res = pass_condition(target_obj, test_name, test_values)
            except Exception as e:
                res = True if unknown_as_pass_condition else False
                printError('Unable to process testcase \'%s\' on value \'%s\', interpreted as %s.' % (test_name, str(target_obj), res))
                printException(e, True)
        # Quick exit and + false
        if condition_operator == 'and' and not res:
            return False
        # Quick exit or + true
        if condition_operator == 'or' and res:
            return True
    # Still here ?
    # or -> false
    # and -> true
    if condition_operator == 'or':
        return False
    else:
        return True
Exemplo n.º 11
0
 def parse_parameter_group(self, global_params, region, parameter_group):
     parameter_group['arn'] = parameter_group.pop('DBParameterGroupArn')
     parameter_group['name'] = parameter_group.pop('DBParameterGroupName')
     api_client = api_clients[region]
     try:
         parameters = handle_truncated_response(api_client.describe_db_parameters, {'DBParameterGroupName': parameter_group['name']}, ['Parameters'])['Parameters']
         manage_dictionary(parameter_group, 'parameters', {})
         for parameter in parameters:
             if not parameter['IsModifiable']:
                 # Discard non-modifiable parameters
                 continue
             parameter_name = parameter.pop('ParameterName')
             parameter_group['parameters'][parameter_name] = parameter
     except Exception as e:
         printException(e)
         printError('Failed fetching DB parameters for %s' % parameter_group['name'])
     # Save
     parameter_group_id = self.get_non_aws_id(parameter_group['name'])
     (self).parameter_groups[parameter_group_id] = parameter_group
Exemplo n.º 12
0
def recurse(all_info, current_info, target_path, current_path, config, add_suffix = False):
    """

    :param all_info:
    :param current_info:
    :param target_path:
    :param current_path:
    :param config:
    :param add_suffix:
    :return:
    """
    results = []
    if len(target_path) == 0:
        # Dashboard: count the number of processed resources here
        setattr(config, 'checked_items', getattr(config, 'checked_items') + 1)
        # Test for conditions...
        if pass_conditions(all_info, current_path, copy.deepcopy(config.conditions)):
            if add_suffix and hasattr(config, 'id_suffix'):
                suffix = fix_path_string(all_info, current_path, config.id_suffix)
                current_path.append(suffix)
            results.append('.'.join(current_path))
        # Return the flagged items...
        return results
    target_path = copy.deepcopy(target_path)
    dbg_target_path = copy.deepcopy(target_path)
    current_path = copy.deepcopy(current_path)
    attribute = target_path.pop(0)
    if type(current_info) == dict:
        if attribute in current_info:
            split_path = copy.deepcopy(current_path)
            split_path.append(attribute)
            results = results + recurse(all_info, current_info[attribute], target_path, split_path, config, add_suffix)
        elif attribute == 'id':
            for key in current_info:
                split_target_path = copy.deepcopy(target_path)
                split_current_path = copy.deepcopy(current_path)
                split_current_path.append(key)
                split_current_info = current_info[key]
                results = results + recurse(all_info, split_current_info, split_target_path, split_current_path, config, add_suffix)
    # To handle lists properly, I would have to make sure the list is properly ordered and I can use the index to consistently access an object... Investigate (or do not use lists)
    elif type(current_info) == list:
        for index, split_current_info in enumerate(current_info):
            split_current_path = copy.deepcopy(current_path)
            split_current_path.append(str(index))
            results = results + recurse(all_info, split_current_info, copy.deepcopy(target_path), split_current_path, config, add_suffix)
    else:
        printError('Error: unhandled case, typeof(current_info) = %s' % type(current_info))
        printError('Path: %s' % current_path)
        printError('Object: %s' % str(current_info))
        printError('Entry target path: %s' % str(dbg_target_path))
        raise Exception
    return results
Exemplo n.º 13
0
def get_s3_bucket_secure_transport(api_client, bucket_name, bucket_info):
    try:
        if 'policy' in bucket_info:
            bucket_info['secure_transport'] = 'Disabled'
            for statement in bucket_info['policy']['Statement']:
                # evaluate statement to see if it contains a condition disallowing HTTP transport
                # TODO this might not cover all cases
                if 'Condition' in statement and \
                        'Bool' in statement['Condition'] and \
                        'aws:SecureTransport' in statement['Condition']['Bool'] and \
                        statement['Condition']['Bool']['aws:SecureTransport'] == 'false' and \
                        statement['Effect'] == 'Deny':
                    bucket_info['secure_transport'] = 'Enabled'
            return True
        else:
            bucket_info['secure_transport'] = 'Disabled'
            return True
    except Exception as e:
        printError('Failed to get evaluate bucket policy for %s: %s' % (bucket_name, e))
        bucket_info['secure_transport'] = 'Unknown'
        return False
Exemplo n.º 14
0
    def run(self, aws_config, skip_dashboard = False):
        # Clean up existing findings
        for service in aws_config['services']:
            aws_config['services'][service][self.ruleset.rule_type] = {}

        # Process each rule
        for finding_path in self.rules:
            for rule in self.rules[finding_path]:
                
                if not rule.enabled:  # or rule.service not in []: # TODO: handle this...
                    continue

                printDebug('Processing %s rule[%s]: "%s"' % (rule.service, rule.filename, rule.description))
                finding_path = rule.path
                path = finding_path.split('.')
                service = path[0]
                manage_dictionary(aws_config['services'][service], self.ruleset.rule_type, {})
                aws_config['services'][service][self.ruleset.rule_type][rule.key] = {}
                aws_config['services'][service][self.ruleset.rule_type][rule.key]['description'] = rule.description
                aws_config['services'][service][self.ruleset.rule_type][rule.key]['path'] = rule.path
                for attr in ['level', 'id_suffix', 'display_path']:
                    if hasattr(rule, attr):
                        aws_config['services'][service][self.ruleset.rule_type][rule.key][attr] = getattr(rule, attr)
                try:
                    setattr(rule, 'checked_items', 0)
                    aws_config['services'][service][self.ruleset.rule_type][rule.key]['items'] = recurse(aws_config['services'], aws_config['services'], path, [], rule, True)
                    if skip_dashboard:
                        continue
                    aws_config['services'][service][self.ruleset.rule_type][rule.key]['dashboard_name'] = rule.dashboard_name
                    aws_config['services'][service][self.ruleset.rule_type][rule.key]['checked_items'] = rule.checked_items
                    aws_config['services'][service][self.ruleset.rule_type][rule.key]['flagged_items'] = len(aws_config['services'][service][self.ruleset.rule_type][rule.key]['items'])
                    aws_config['services'][service][self.ruleset.rule_type][rule.key]['service'] = rule.service
                    aws_config['services'][service][self.ruleset.rule_type][rule.key]['rationale'] = rule.rationale if hasattr(rule, 'rationale') else 'N/A'
                except Exception as e:
                    printException(e)
                    printError('Failed to process rule defined in %s' % rule.filename)
                    # Fallback if process rule failed to ensure report creation and data dump still happen
                    aws_config['services'][service][self.ruleset.rule_type][rule.key]['checked_items'] = 0
                    aws_config['services'][service][self.ruleset.rule_type][rule.key]['flagged_items'] = 0
Exemplo n.º 15
0
 def fetch_password_policy(self, credentials):
     """
     Fetch the password policy that applies to all IAM users within the AWS account
     """
     self.fetchstatuslogger.counts['password_policy']['discovered'] = 0
     self.fetchstatuslogger.counts['password_policy']['fetched'] = 0
     try:
         api_client = connect_service('iam', credentials)
         self.password_policy = api_client.get_account_password_policy(
         )['PasswordPolicy']
         if 'PasswordReusePrevention' not in self.password_policy:
             self.password_policy['PasswordReusePrevention'] = False
         else:
             self.password_policy[
                 'PreviousPasswordPrevented'] = self.password_policy[
                     'PasswordReusePrevention']
             self.password_policy['PasswordReusePrevention'] = True
         # There is a bug in the API: ExpirePasswords always returns false
         if 'MaxPasswordAge' in self.password_policy:
             self.password_policy['ExpirePasswords'] = True
         self.fetchstatuslogger.counts['password_policy']['discovered'] = 1
         self.fetchstatuslogger.counts['password_policy']['fetched'] = 1
     except Exception as e:
         if type(e) == botocore.exceptions.ClientError:
             if e.response['Error']['Code'] == 'NoSuchEntity':
                 self.password_policy = {}
                 self.password_policy[
                     'MinimumPasswordLength'] = '1'  # As of 10/10/2016, 1-character passwords were authorized when no policy exists, even though the console displays 6
                 self.password_policy['RequireUppercaseCharacters'] = False
                 self.password_policy['RequireLowercaseCharacters'] = False
                 self.password_policy['RequireNumbers'] = False
                 self.password_policy['RequireSymbols'] = False
                 self.password_policy['PasswordReusePrevention'] = False
                 self.password_policy['ExpirePasswords'] = False
             else:
                 printError("Unexpected error: %s" % e)
         else:
             printError(str(e))
Exemplo n.º 16
0
def sort_vpc_flow_logs_callback(vpc_config, current_config, path, current_path,
                                flow_log_id, callback_args):
    attached_resource = current_config['ResourceId']
    if attached_resource.startswith('vpc-'):
        vpc_path = combine_paths(current_path[0:2],
                                 ['vpcs', attached_resource])
        try:
            attached_vpc = get_object_at(vpc_config, vpc_path)
        except Exception as e:
            printDebug(
                'It appears that the flow log %s is attached to a resource that was previously deleted (%s).'
                % (flow_log_id, attached_resource))
            return
        manage_dictionary(attached_vpc, 'flow_logs', [])
        if flow_log_id not in attached_vpc['flow_logs']:
            attached_vpc['flow_logs'].append(flow_log_id)
        for subnet_id in attached_vpc['subnets']:
            manage_dictionary(attached_vpc['subnets'][subnet_id], 'flow_logs',
                              [])
            if flow_log_id not in attached_vpc['subnets'][subnet_id][
                    'flow_logs']:
                attached_vpc['subnets'][subnet_id]['flow_logs'].append(
                    flow_log_id)
    elif attached_resource.startswith('subnet-'):
        all_vpcs = get_object_at(vpc_config,
                                 combine_paths(current_path[0:2], ['vpcs']))
        for vpc in all_vpcs:
            if attached_resource in all_vpcs[vpc]['subnets']:
                manage_dictionary(all_vpcs[vpc]['subnets'][attached_resource],
                                  'flow_logs', [])
                if flow_log_id not in all_vpcs[vpc]['subnets'][
                        attached_resource]['flow_logs']:
                    all_vpcs[vpc]['subnets'][attached_resource][
                        'flow_logs'].append(flow_log_id)
                break
    else:
        printError('Resource %s attached to flow logs is not handled' %
                   attached_resource)
Exemplo n.º 17
0
 def parse_parameter_group(self, global_params, region, parameter_group):
     parameter_group['arn'] = parameter_group.pop('DBParameterGroupArn')
     parameter_group['name'] = parameter_group.pop('DBParameterGroupName')
     api_client = api_clients[region]
     try:
         parameters = handle_truncated_response(
             api_client.describe_db_parameters,
             {'DBParameterGroupName': parameter_group['name']},
             ['Parameters'])['Parameters']
         for parameter in parameters:
             param = {}
             param['value'] = parameter[
                 'ParameterValue'] if 'ParameterValue' in parameter else None
             param['source'] = parameter['Source']
             manage_dictionary(parameter_group, 'parameters', {})
             parameter_group['parameters'][
                 parameter['ParameterName']] = param
     except Exception as e:
         printException(e)
         printError('Failed fetching DB parameters for %s' %
                    parameter_group['name'])
     # Save
     (self).parameter_groups[parameter_group['name']] = parameter_group
Exemplo n.º 18
0
 def parse_parameter_group(self, global_params, region, parameter_group):
     parameter_group['arn'] = parameter_group.pop('DBParameterGroupArn')
     parameter_group['name'] = parameter_group.pop('DBParameterGroupName')
     api_client = api_clients[region]
     try:
         parameters = handle_truncated_response(
             api_client.describe_db_parameters,
             {'DBParameterGroupName': parameter_group['name']},
             ['Parameters'])['Parameters']
         manage_dictionary(parameter_group, 'parameters', {})
         for parameter in parameters:
             if not parameter['IsModifiable']:
                 # Discard non-modifiable parameters
                 continue
             parameter_name = parameter.pop('ParameterName')
             parameter_group['parameters'][parameter_name] = parameter
     except Exception as e:
         printException(e)
         printError('Failed fetching DB parameters for %s' %
                    parameter_group['name'])
     # Save
     parameter_group_id = self.get_non_provider_id(parameter_group['name'])
     (self).parameter_groups[parameter_group_id] = parameter_group
Exemplo n.º 19
0
def set_emr_vpc_ids_callback(aws_config, current_config, path, current_path, vpc_id, callback_args):
    if vpc_id != 'TODO':
        return
    region = current_path[3]
    vpc_id = sg_id = subnet_id = None
    pop_list = []
    for cluster_id in current_config['clusters']:
        cluster = current_config['clusters'][cluster_id]
        if 'EmrManagedMasterSecurityGroup' in cluster['Ec2InstanceAttributes']:
            sg_id = cluster['Ec2InstanceAttributes']['EmrManagedMasterSecurityGroup']
        elif 'RequestedEc2SubnetIds' in cluster['Ec2InstanceAttributes']:
            subnet_id = cluster['Ec2InstanceAttributes']['RequestedEc2SubnetIds']
        else:
            printError('Unable to determine VPC id for EMR cluster %s' % str(cluster_id))
            continue
        if sg_id in sg_map:
            vpc_id = sg_map[sg_id]['vpc_id']
            pop_list.append(cluster_id)
        else:
            sid_found = False
            if subnet_id:
                for sid in subnet_id:
                    if sid in subnet_map:
                        vpc_id = subnet_map[sid]['vpc_id']
                        pop_list.append(cluster_id)
                        sid_found = True
            if not sid_found:
                printError('Unable to determine VPC id for %s' % (str(subnet_id) if subnet_id else str(sg_id)))
                continue
        if vpc_id:
            region_vpcs_config = get_object_at(aws_config, current_path)
            manage_dictionary(region_vpcs_config, vpc_id, {'clusters': {}})
            region_vpcs_config[vpc_id]['clusters'][cluster_id] = cluster
    for cluster_id in pop_list:
        current_config['clusters'].pop(cluster_id)
    if len(current_config['clusters']) == 0:
        callback_args['clear_list'].append(region)
Exemplo n.º 20
0
def prepare_cloudformation_params(stack_name,
                                  template_path,
                                  template_parameters,
                                  resource_type,
                                  tags=[],
                                  need_on_failure=False):
    """

    :param api_client:
    :param stack_name:
    :param template_path:
    :param template_parameters:         List of parameter keys and values
    :param quiet:
    :return:
    """
    printDebug('Reading CloudFormation template from %s' % template_path)
    template_body = read_file(template_path)
    params = {}
    params['%sName' % resource_type] = stack_name
    params['TemplateBody'] = template_body
    if len(template_parameters):
        params['Parameters'] = []
        it = iter(template_parameters)
        for param in it:
            printError('Param:: %s' % param)
            params['Parameters'].append({
                'ParameterKey': param,
                'ParameterValue': next(it)
            })

    if len(tags):
        params['Tags'] = tags
    if re_iam_capability.match(template_body):
        params['Capabilities'] = ['CAPABILITY_NAMED_IAM']
    if need_on_failure:
        params['OnFailure'] = 'ROLLBACK'
    return params
Exemplo n.º 21
0
def set_emr_vpc_ids_callback(aws_config, current_config, path, current_path, vpc_id, callback_args):
    if vpc_id != 'TODO':
        return
    region = current_path[3]
    vpc_id = sg_id = subnet_id = None
    pop_list = []
    for cluster_id in current_config['clusters']:
        cluster = current_config['clusters'][cluster_id]
        if 'EmrManagedMasterSecurityGroup' in cluster['Ec2InstanceAttributes']:
            sg_id = cluster['Ec2InstanceAttributes']['EmrManagedMasterSecurityGroup']
        elif 'RequestedEc2SubnetIds' in cluster['Ec2InstanceAttributes']:
            subnet_id = cluster['Ec2InstanceAttributes']['RequestedEc2SubnetIds']
        else:
            printError('Unable to determine VPC id for EMR cluster %s' % str(cluster_id))
            continue
        if sg_id in sg_map:
            vpc_id = sg_map[sg_id]['vpc_id']
            pop_list.append(cluster_id)
        else:
            sid_found = False
            if subnet_id:
                for sid in subnet_id:
                    if sid in subnet_map:
                        vpc_id = subnet_map[sid]['vpc_id']
                        pop_list.append(cluster_id)
                        sid_found = True
            if not sid_found:
                printError('Unable to determine VPC id for %s' % (str(subnet_id) if subnet_id else str(sg_id)))
                continue
        if vpc_id:
            region_vpcs_config = get_object_at(aws_config, current_path)
            manage_dictionary(region_vpcs_config, vpc_id, {'clusters': {}})
            region_vpcs_config[vpc_id]['clusters'][cluster_id] = cluster
    for cluster_id in pop_list:
        current_config['clusters'].pop(cluster_id)
    if len(current_config['clusters']) == 0:
        callback_args['clear_list'].append(region)
Exemplo n.º 22
0
def get_s3_bucket_secure_transport(api_client, bucket_name, bucket_info):
    try:
        if 'policy' in bucket_info:
            bucket_info['secure_transport'] = 'Disabled'
            for statement in bucket_info['policy']['Statement']:
                # evaluate statement to see if it contains a condition disallowing HTTP transport
                # TODO this might not cover all cases
                if 'Condition' in statement and \
                        'Bool' in statement['Condition'] and \
                        'aws:SecureTransport' in statement['Condition']['Bool'] and \
                        ((statement['Condition']['Bool']['aws:SecureTransport'] == 'false' and \
                          statement['Effect'] == 'Deny') or
                         (statement['Condition']['Bool']['aws:SecureTransport'] == 'true' and \
                          statement['Effect'] == 'Allow')):
                    bucket_info['secure_transport'] = 'Enabled'
            return True
        else:
            bucket_info['secure_transport'] = 'Disabled'
            return True
    except Exception as e:
        printError('Failed to get evaluate bucket policy for %s: %s' %
                   (bucket_name, e))
        bucket_info['secure_transport'] = 'Unknown'
        return False
Exemplo n.º 23
0
def sort_vpc_flow_logs_callback(aws_config, current_config, path, current_path, flow_log_id, callback_args):
    attached_resource = current_config['ResourceId']
    if attached_resource.startswith('vpc-'):
        vpc_path = combine_paths(current_path[0:4], ['vpcs', attached_resource])
        try:
            attached_vpc = get_object_at(aws_config, vpc_path)
        except Exception as e:
            printDebug('It appears that the flow log %s is attached to a resource that was previously deleted (%s).' % (flow_log_id, attached_resource))
            return
        manage_dictionary(attached_vpc, 'flow_logs', [])
        if flow_log_id not in attached_vpc['flow_logs']:
            attached_vpc['flow_logs'].append(flow_log_id)
        for subnet_id in attached_vpc['subnets']:
            manage_dictionary(attached_vpc['subnets'][subnet_id], 'flow_logs', [])
            if flow_log_id not in attached_vpc['subnets'][subnet_id]['flow_logs']:
                attached_vpc['subnets'][subnet_id]['flow_logs'].append(flow_log_id)
    elif attached_resource.startswith('subnet-'):
        subnet_path = combine_paths(current_path[0:4], ['vpcs', subnet_map[attached_resource]['vpc_id'], 'subnets', attached_resource])
        subnet = get_object_at(aws_config, subnet_path)
        manage_dictionary(subnet, 'flow_logs', [])
        if flow_log_id not in subnet['flow_logs']:
            subnet['flow_logs'].append(flow_log_id)
    else:
        printError('Resource %s attached to flow logs is not handled' % attached_resource)
Exemplo n.º 24
0
def wait_for_operation(api_client,
                       stack_set_name,
                       operation_id,
                       timeout=5 * 60,
                       increment=5):
    printDebug('Waiting for operation %s on stack set %s...' %
               (operation_id, stack_set_name))
    timer = 0
    status = ''
    while True:
        if timer >= timeout:
            printError('Timed out.')
            break
        info = api_client.describe_stack_set_operation(
            StackSetName=stack_set_name, OperationId=operation_id)
        status = info['StackSetOperation']['Status']
        if status not in ['RUNNING', 'STOPPING']:
            break
        printError(
            'Operation status is \'%s\'... waiting %d seconds until next check...'
            % (status, increment))
        time.sleep(increment)
        timer += increment
    return 'Operation %s is %s' % (operation_id, status)
Exemplo n.º 25
0
    def load(self):
        """
        Load the definition of the rule, searching in the specified rule dirs first, then in the built-in definitions

        :return:                        None
        """
        file_name_valid = False
        rule_type_valid = False
        file_path = None
        # Look for a locally-defined rule
        for rule_dir in self.rule_dirs:
            try:
                file_path = os.path.join(
                    rule_dir, self.file_name) if rule_dir else self.file_name
            except Exception as e:
                printError('Failed to load file %s: %e' % (self.file_name, e))
            if os.path.isfile(file_path):
                self.file_path = file_path
                file_name_valid = True
                break
        # Look for a built-in rule
        if not file_name_valid:
            for rule_type in self.rule_types:
                if self.file_name.startswith(rule_type):
                    self.file_path = os.path.join(self.rules_data_path,
                                                  self.file_name)
                    rule_type_valid = True
                    file_name_valid = True
                    break
            if not rule_type_valid:
                for rule_type in self.rule_types:
                    self.file_path = os.path.join(self.rules_data_path,
                                                  rule_type, self.file_name)
                    if os.path.isfile(self.file_path):
                        file_name_valid = True
                        break
            else:
                if os.path.isfile(self.file_path):
                    file_name_valid = True
        if not file_name_valid:
            printError('Error: could not find %s' % self.file_name)
        else:
            try:
                with open(self.file_path, 'rt') as f:
                    self.string_definition = f.read()
                    self.load_from_string_definition()
            except Exception as e:
                # printException(e)
                printError('Failed to load rule defined in %s: %s' %
                           (self.file_name, e))
Exemplo n.º 26
0
def check_versions(min_version, installed_version, max_version, package_name, strict = False):
    """

    :param min_version:
    :param installed_version:
    :param max_version:
    :param package_name:

    :return:
    """
    if not min_version:
        # If no minimum version was specified, pass
        return True
    if StrictVersion(installed_version) < StrictVersion(min_version):
        printError('Error: the version of %s installed on this system (%s) is too old. You need at least version %s to run this tool.' % (package_name, OPINEL_VERSION, min_version))
        return False
    if max_version and StrictVersion(installed_version) >= StrictVersion(max_version):
        printError('Warning: ther version of %s installed on this system (%s) is too recent; you may experience unexpected runtime errors as versions above %s have not been tested.' % (package_name, installed_version, max_version))
        if strict:
            printError('Warning treated as error.')
            return False
    return True
Exemplo n.º 27
0
def main():

    # Parse arguments
    parser = OpinelArgumentParser()
    parser.add_argument('debug')
    parser.add_argument('profile')
    parser.add_argument('csv-credentials')
    parser.add_argument('mfa-serial')
    parser.add_argument('mfa-code')
    parser.parser.add_argument('--role-arn',
                                dest='role_arn',
                                default=None,
                                help='ARN of the assumed role.')
    parser.parser.add_argument('--external-id',
                                dest='external_id',
                                default=None,
                                help='External ID to use when assuming the role.')
    args = parser.parse_args()

    # Configure the debug level
    configPrintException(args.debug)

    # Check version of opinel
    if not check_requirements(os.path.realpath(__file__)):
        return 42

    # Arguments
    profile_name = args.profile[0]

    if args.csv_credentials:
        # Read credentials from a CSV file
        credentials = {}
        credentials['AccessKeyId'], credentials['SecretAccessKey'], credentials['SerialNumber'] = read_creds_from_csv(args.csv_credentials)
        if not credentials['AccessKeyId'] or not credentials['SecretAccessKey']:
            printError('Failed to read credentials from %s' % args.csv_credentials)
            return 42
        use_found_credentials = True
    else:
        # Check for migration from existing profile to no-mfa profile
        use_found_credentials = False
        credentials = read_creds_from_aws_credentials_file(profile_name)
        if 'AccessKeyId' in credentials and credentials['AccessKeyId'] != None and credentials['SecretAccessKey'] != None and credentials['SerialNumber'] == None and credentials['SessionToken'] == None:
            if prompt_4_yes_no('Found long-lived credentials for the profile \'%s\'. Do you want to use those when configuring MFA' % profile_name):
               use_found_credentials = True
               iam_client = connect_service('iam', credentials)
               try:
                   printInfo('Trying to read the MFA serial number associated with this IAM user...')
                   user_name = iam_client.get_user()['User']['UserName']
                   mfa_devices = iam_client.list_mfa_devices(UserName = user_name)['MFADevices']
                   credentials['SerialNumber'] = mfa_devices[0]['SerialNumber']
               except Exception as e:
                   printException(e)
                   pass

    if not use_found_credentials:
       # Get values
        credentials['AccessKeyId'] = prompt_4_value('AWS Access Key ID: ', no_confirm = True)
        credentials['SecretAccessKey'] = prompt_4_value('AWS Secret Access Key: ', no_confirm = True)
    if 'SerialNumber' not in credentials or not credentials['SerialNumber']:
        credentials['SerialNumber'] = prompt_4_mfa_serial()

    # Check for overwrite
    while True:
        c = read_creds_from_aws_credentials_file(profile_name)
        if 'AccessKeyId' in c and c['AccessKeyId']:
            if not prompt_4_yes_no('The profile \'%s\' already exists. Do you want to overwrite the existing values' % profile_name):
                if not prompt_4_yes_no('Do you want to create a new profile with these credentials'):
                    printError('Configuration aborted.')
                    return
                profile_name = prompt_4_value('Profile name: ')
            else:
                break
        else:
            break

    # Write values to credentials file
    write_creds_to_aws_credentials_file(profile_name, credentials)

    # Delete CSV file?
    if args.csv_credentials and prompt_4_yes_no('Do you want to delete the CSV file that contains your long-lived credentials?'):
        os.remove(args.csv_credentials)
Exemplo n.º 28
0
 def update_ruleset(self, rules_dir):
     if rules_dir == None:
         return
     self.available_rules = {}
     parameterized_rules = []
     self.services = []
     for rule in self.ruleset['rules']:
         rule['filename'] = rule['filename'].replace('rules/', '')
         if not 'args' in rule:
             self.available_rules[rule['filename']] = rule
         else:
             parameterized_rules.append(rule)
     # Add default location
     rules_dir.append(
         os.path.join(os.path.dirname(os.path.realpath(__file__)),
                      'data/findings'))
     for dir in rules_dir:
         rule_filenames = [
             f for f in os.listdir(dir)
             if os.path.isfile(os.path.join(dir, f))
         ]
         for rule_filename in rule_filenames:
             self.services.append(rule_filename.split('-')[0].lower())
             printDebug('Loading %s' % rule_filename)
             with open('%s/%s' % (dir, rule_filename), 'rt') as f:
                 rule = json.load(f)
                 if not 'key' in rule and not 'arg_names' in rule:
                     # Non-parameterized rule, save it
                     if rule_filename in self.available_rules:
                         self.available_rules[rule_filename].update(rule)
                     else:
                         self.available_rules[rule_filename] = rule
                         self.available_rules[rule_filename][
                             'enabled'] = False
                         if 'level' not in self.available_rules[
                                 rule_filename]:
                             self.available_rules[rule_filename][
                                 'level'] = 'danger'
                             self.available_rules[rule_filename][
                                 'filename'] = rule_filename
                 else:
                     # Parameterized rules, find all occurences and save N times
                     parameterized_rule_found = False
                     for prule in parameterized_rules:
                         if prule['filename'] == rule_filename:
                             parameterized_rule_found = True
                             for k in rule:
                                 prule[k] = set_argument_values(
                                     rule[k], prule['args'], convert=True
                                 ) if k != 'conditions' else rule[k]
                             key = prule.pop(
                                 'key'
                             ) if 'key' in prule else prule['filename']
                             args = prule.pop('args')
                             if not 'arg_names' in prule:
                                 printError('No arg names key in %s' %
                                            rule_filename)
                                 continue
                             arg_names = prule.pop('arg_names')
                             if len(args) != len(arg_names):
                                 printError(
                                     'Error: rule %s expects %d arguments but was provided %d.'
                                     % (rule_filename, len(arg_names),
                                        len(args)))
                                 continue
                             prule['args'] = []
                             for (arg_name,
                                  arg_value) in zip(arg_names, args):
                                 prule['args'].append({
                                     'arg_name': arg_name,
                                     'arg_value': arg_value
                                 })
                             if 'level' not in prule:
                                 prule['level'] = 'danger'
                                 self.available_rules[key] = prule
                     if not parameterized_rule_found:
                         # Save once with no parameters
                         self.available_rules[rule_filename] = rule
                         self.available_rules[rule_filename][
                             'enabled'] = False
                         if 'level' not in self.available_rules[
                                 rule_filename]:
                             self.available_rules[rule_filename][
                                 'level'] = 'danger'
                             self.available_rules[rule_filename][
                                 'filename'] = rule_filename
                         args = []
                         for a in rule['arg_names']:
                             args.append({'arg_name': a, 'arg_value': ''})
                             self.available_rules[rule_filename][
                                 'args'] = args
                         printDebug(
                             'Saving rule without parameter value: %s' %
                             rule_filename)
Exemplo n.º 29
0
    def authenticate(self, key_file=None, user_account=None, service_account=None, azure_cli=None, azure_msi=None,
                     azure_service_principal=None, azure_file_auth=None, azure_user_credentials=None, **kargs):
        """
        Implements authentication for the Azure provider using azure-cli.
        Refer to https://docs.microsoft.com/en-us/python/azure/python-sdk-azure-authenticate?view=azure-python.

        :return:
        """

        try:
            if azure_cli:
                cli_credentials, self.aws_account_id = get_azure_cli_credentials()  # TODO: Remove aws_account_id
                self.credentials = AzureCredentials(cli_credentials, self.aws_account_id)
                return True
            elif azure_msi:
                credentials = MSIAuthentication()

                # Get the subscription ID
                subscription_client = SubscriptionClient(credentials)
                try:
                    # Tries to read the subscription list
                    subscription = next(subscription_client.subscriptions.list())
                    self.aws_account_id = subscription.subscription_id
                except StopIteration:
                    # If the VM cannot read subscription list, ask Subscription ID:
                    self.aws_account_id = input('Subscription ID: ')

                self.credentials = AzureCredentials(credentials, self.aws_account_id)
                return True
            elif azure_file_auth:
                with open(azure_file_auth) as f:
                    data = json.loads(f.read())
                    subscription_id = data.get('subscriptionId')
                    tenant_id = data.get('tenantId')
                    client_id = data.get('clientId')
                    client_secret = data.get('clientSecret')

                    self.aws_account_id = tenant_id  # TODO this is for AWS

                    credentials = ServicePrincipalCredentials(
                        client_id=client_id,
                        secret=client_secret,
                        tenant=tenant_id
                    )

                    self.credentials = AzureCredentials(credentials, subscription_id)

                    return True
            elif azure_service_principal:
                subscription_id = input("Subscription ID: ")
                tenant_id = input("Tenant ID: ")
                client_id = input("Client ID: ")
                client_secret = getpass("Client secret: ")

                self.aws_account_id = tenant_id  # TODO this is for AWS

                credentials = ServicePrincipalCredentials(
                    client_id=client_id,
                    secret=client_secret,
                    tenant=tenant_id
                )

                self.credentials = AzureCredentials(credentials, subscription_id)

                return True
            elif azure_user_credentials:
                username = input("Username: "******"Password: "******""  # TODO this is for AWS
                self.credentials = AzureCredentials(credentials, self.aws_account_id)
                return True
        except Exception as e:
            printError('Failed to authenticate to Azure')
            printException(e)
            return False
Exemplo n.º 30
0
    def analyze(self, aws_config):
        """

        :param aws_config:
        """
        printInfo('Analyzing AWS config...')
        # TODO: reset violations for all services in scope (maybe this can be done somewhere else (e.g. loading)
        for finding_path in self.rules:
            for rule in self.rules[finding_path]:
                printDebug('Processing %s rule[%s]: "%s"' %
                           (finding_path.split('.')[0], self.rule_type[:-1],
                            self.rules[finding_path][rule]['description']))
                path = finding_path.split('.')
                service = path[0]
                manage_dictionary(aws_config['services'][service],
                                  self.rule_type, {})
                aws_config['services'][service][self.rule_type][rule] = {}
                aws_config['services'][service][
                    self.rule_type][rule]['description'] = self.rules[
                        finding_path][rule]['description']
                aws_config['services'][service][self.rule_type][rule][
                    'path'] = self.rules[finding_path][rule]['path']
                if self.rule_type == 'findings':
                    aws_config['services'][service][self.rule_type][rule][
                        'level'] = self.rules[finding_path][rule]['level']
                if 'id_suffix' in self.rules[finding_path][rule]:
                    aws_config['services'][service][
                        self.rule_type][rule]['id_suffix'] = self.rules[
                            finding_path][rule]['id_suffix']
                if 'display_path' in self.rules[finding_path][rule]:
                    aws_config['services'][service][
                        self.rule_type][rule]['display_path'] = self.rules[
                            finding_path][rule]['display_path']
                try:
                    aws_config['services'][service][
                        self.rule_type][rule]['items'] = recurse(
                            aws_config['services'], aws_config['services'],
                            path, [], self.rules[finding_path][rule], True)
                    aws_config['services'][service][self.rule_type][rule][
                        'dashboard_name'] = self.rules[finding_path][rule][
                            'dashboard_name'] if 'dashboard_name' in self.rules[
                                finding_path][rule] else '??'
                    aws_config['services'][service][self.rule_type][rule][
                        'checked_items'] = self.rules[finding_path][rule][
                            'checked_items'] if 'checked_items' in self.rules[
                                finding_path][rule] else 0
                    aws_config['services'][service][
                        self.rule_type][rule]['flagged_items'] = len(
                            aws_config['services'][service][
                                self.rule_type][rule]['items'])
                    aws_config['services'][service][
                        self.rule_type][rule]['service'] = service
                    aws_config['services'][service][self.rule_type][rule][
                        'rationale'] = self.rules[finding_path][rule][
                            'rationale'] if 'rationale' in self.rules[
                                finding_path][rule] else 'N/A'
                except Exception as e:
                    printError('Failed to process rule defined in %s.json' %
                               rule)
                    # Fallback if process rule failed to ensure report creation and data dump still happen
                    aws_config['services'][service][
                        self.rule_type][rule]['checked_items'] = 0
                    aws_config['services'][service][
                        self.rule_type][rule]['flagged_items'] = 0
                    printException(e)
Exemplo n.º 31
0
def recurse(all_info,
            current_info,
            target_path,
            current_path,
            config,
            add_suffix=False):
    """

    :param all_info:
    :param current_info:
    :param target_path:
    :param current_path:
    :param config:
    :param add_suffix:
    :return:
    """
    results = []
    if len(target_path) == 0:
        # Dashboard: count the number of processed resources here
        manage_dictionary(config, 'checked_items', 0)
        config['checked_items'] = config['checked_items'] + 1
        # Test for conditions...
        if pass_conditions(all_info, current_path,
                           copy.deepcopy(config['conditions'])):
            if add_suffix and 'id_suffix' in config:
                current_path.append(config['id_suffix'])
            results.append('.'.join(current_path))
        # Return the flagged items...
        config['flagged_items'] = len(results)
        return results
    target_path = copy.deepcopy(target_path)
    current_path = copy.deepcopy(current_path)
    attribute = target_path.pop(0)
    if type(current_info) == dict:
        if attribute in current_info:
            split_path = copy.deepcopy(current_path)
            split_path.append(attribute)
            results = results + recurse(all_info, current_info[attribute],
                                        target_path, split_path, config,
                                        add_suffix)
        elif attribute == 'id':
            for key in current_info:
                split_target_path = copy.deepcopy(target_path)
                split_current_path = copy.deepcopy(current_path)
                split_current_path.append(key)
                split_current_info = current_info[key]
                results = results + recurse(
                    all_info, split_current_info, split_target_path,
                    split_current_path, config, add_suffix)
    # To handle lists properly, I would have to make sure the list is properly ordered and I can use the index to consistently access an object... Investigate (or do not use lists)
    elif type(current_info) == list:
        for index, split_current_info in enumerate(current_info):
            split_current_path = copy.deepcopy(current_path)
            split_current_path.append(str(index))
            results = results + recurse(all_info, split_current_info,
                                        copy.deepcopy(target_path),
                                        split_current_path, config, add_suffix)
    else:
        printError('Error: unhandled case, typeof(current_info) = %s' %
                   type(current_info))
        printError(str(current_info))
        raise Exception
    return results
Exemplo n.º 32
0
def delete_user(iam_client,
                user,
                mfa_serial=None,
                keep_user=False,
                terminated_groups=[]):
    """
    Delete IAM user

    :param iam_client:
    :param user:
    :param mfa_serial:
    :param keep_user:
    :param terminated_groups:
    :return:
    """
    errors = []
    printInfo('Deleting user %s...' % user)
    # Delete access keys
    try:
        aws_keys = get_access_keys(iam_client, user)
        for aws_key in aws_keys:
            try:
                printInfo(
                    'Deleting access key ID %s... ' % aws_key['AccessKeyId'],
                    False)
                iam_client.delete_access_key(
                    AccessKeyId=aws_key['AccessKeyId'], UserName=user)
                printInfo('Success')
            except Exception as e:
                printInfo('Failed')
                printException(e)
                errors.append(e.response['Error']['Code'])
    except Exception as e:
        printException(e)
        printError('Failed to get access keys for user %s.' % user)
    # Deactivate and delete MFA devices
    try:
        mfa_devices = iam_client.list_mfa_devices(UserName=user)['MFADevices']
        for mfa_device in mfa_devices:
            serial = mfa_device['SerialNumber']
            try:
                printInfo('Deactivating MFA device %s... ' % serial, False)
                iam_client.deactivate_mfa_device(SerialNumber=serial,
                                                 UserName=user)
                printInfo('Success')
            except Exception as e:
                printInfo('Failed')
                printException(e)
                errors.append(e.response['Error']['Code'])
            delete_virtual_mfa_device(iam_client, serial)
        if mfa_serial:
            delete_virtual_mfa_device(iam_client, mfa_serial)
    except Exception as e:
        printException(e)
        printError(
            'Faile to fetch/delete MFA device serial number for user %s.' %
            user)
        errors.append(e.response['Error']['Code'])
    # Remove IAM user from groups
    try:
        groups = iam_client.list_groups_for_user(UserName=user)['Groups']
        for group in groups:
            try:
                printInfo('Removing from group %s... ' % group['GroupName'],
                          False)
                iam_client.remove_user_from_group(GroupName=group['GroupName'],
                                                  UserName=user)
                printInfo('Success')
            except Exception as e:
                printInfo('Failed')
                printException(e)
                errors.append(e.response['Error']['Code'])
    except Exception as e:
        printException(e)
        printError('Failed to fetch IAM groups for user %s.' % user)
        errors.append(e.response['Error']['Code'])
    # Delete login profile
    login_profile = []
    try:
        login_profile = iam_client.get_login_profile(
            UserName=user)['LoginProfile']
    except Exception as e:
        pass
    try:
        if len(login_profile):
            printInfo('Deleting login profile... ', False)
            iam_client.delete_login_profile(UserName=user)
            printInfo('Success')
    except Exception as e:
        printInfo('Failed')
        printException(e)
        errors.append(e.response['Error']['Code'])
    # Delete inline policies
    try:
        printInfo('Deleting inline policies... ', False)
        policies = iam_client.list_user_policies(UserName=user)
        for policy in policies['PolicyNames']:
            iam_client.delete_user_policy(UserName=user, PolicyName=policy)
        printInfo('Success')
    except Exception as e:
        printInfo('Failed')
        printException(e)
        errors.append(e.response['Error']['Code'])
    # Detach managed policies
    try:
        printInfo('Detaching managed policies... ', False)
        policies = iam_client.list_attached_user_policies(UserName=user)
        for policy in policies['AttachedPolicies']:
            iam_client.detach_user_policy(UserName=user,
                                          PolicyArn=policy['PolicyArn'])
        printInfo('Success')
    except Exception as e:
        printInfo('Failed')
        printException(e)
        errors.append(e.response['Error']['Code'])
    # Delete IAM user
    try:
        if not keep_user:
            iam_client.delete_user(UserName=user)
            printInfo('User %s deleted.' % user)
        else:
            for group in terminated_groups:
                add_user_to_group(iam_client, group, user)
    except Exception as e:
        printException(e)
        printError('Failed to delete user.')
        errors.append(e.response['Error']['Code'])
        pass
    return errors
Exemplo n.º 33
0
    def set_definition(self,
                       rule_definitions,
                       attributes=None,
                       ip_ranges=None,
                       params=None):
        """
        Update every attribute of the rule by setting the argument values as necessary

        :param parameterized_input:
        :param arg_values:
        :param convert:
        :return:
        """
        attributes = [] if attributes is None else attributes
        ip_ranges = [] if ip_ranges is None else ip_ranges
        params = {} if params is None else params
        try:
            string_definition = rule_definitions[
                self.filename].string_definition
            # Load condition dependencies
            definition = json.loads(string_definition)
            definition['conditions'] += self.conditions
            loaded_conditions = []
            for condition in definition['conditions']:
                if condition[0].startswith('_INCLUDE_('):
                    include = re.findall(r'_INCLUDE_\((.*?)\)',
                                         condition[0])[0]
                    #new_conditions = load_data(include, key_name = 'conditions')
                    rules_path = '%s/%s' % (self.data_path, include)
                    with open(rules_path, 'rt') as f:
                        new_conditions = f.read()
                        for (i, value) in enumerate(condition[1]):
                            new_conditions = re.sub(condition[1][i],
                                                    condition[2][i],
                                                    new_conditions)
                        new_conditions = json.loads(
                            new_conditions)['conditions']
                    loaded_conditions.append(new_conditions)
                else:
                    loaded_conditions.append(condition)
            definition['conditions'] = loaded_conditions
            string_definition = json.dumps(definition)
            # Set parameters
            parameters = re.findall(r'(_ARG_([a-zA-Z0-9]+)_)',
                                    string_definition)
            for param in parameters:
                index = int(param[1])
                if len(self.args) <= index:
                    string_definition = string_definition.replace(param[0], '')
                elif type(self.args[index]) == list:
                    value = '[ %s ]' % ', '.join('"%s"' % v
                                                 for v in self.args[index])
                    string_definition = string_definition.replace(
                        '"%s"' % param[0], value)
                else:
                    string_definition = string_definition.replace(
                        param[0], self.args[index])
            # Strip dots if necessary
            stripdots = re_strip_dots.findall(string_definition)
            for value in stripdots:
                string_definition = string_definition.replace(
                    value[0], value[1].replace('.', ''))
            definition = json.loads(string_definition)
            # Set special values (IP ranges, AWS account ID, ...)
            for condition in definition['conditions']:
                if type(condition) != list or len(condition) == 1 or type(
                        condition[2]) == list:
                    continue
                for testcase in testcases:
                    result = testcase['regex'].match(condition[2])
                    if result and (testcase['name'] == 'ip_ranges_from_file'
                                   or testcase['name']
                                   == 'ip_ranges_from_local_file'):
                        filename = result.groups()[0]
                        conditions = result.groups()[1] if len(
                            result.groups()) > 1 else []
                        # TODO :: handle comma here...
                        if filename == ip_ranges_from_args:
                            prefixes = []
                            for filename in ip_ranges:
                                prefixes += read_ip_ranges(
                                    filename,
                                    local_file=True,
                                    ip_only=True,
                                    conditions=conditions)
                            condition[2] = prefixes
                            break
                        else:
                            local_file = True if testcase[
                                'name'] == 'ip_ranges_from_local_file' else False
                            condition[2] = read_ip_ranges(
                                filename,
                                local_file=local_file,
                                ip_only=True,
                                conditions=conditions)
                            break
                        break
                    elif result:
                        condition[2] = params[testcase['name']]
                        break

            if len(attributes) == 0:
                attributes = [attr for attr in definition]
            for attr in attributes:
                if attr in definition:
                    setattr(self, attr, definition[attr])
            if hasattr(self, 'path'):
                self.service = format_service_name(self.path.split('.')[0])
            if not hasattr(self, 'key'):
                setattr(self, 'key', self.filename)
            setattr(self, 'key', self.key.replace('.json', ''))
            if self.key_suffix:
                setattr(self, 'key', '%s-%s' % (self.key, self.key_suffix))
        except Exception as e:
            # printException(e)
            printError('Failed to set definition %s: %s' % (self.filename, e))
Exemplo n.º 34
0
    def test_all_finding_rules(self):
        test_dir = os.path.dirname(os.path.realpath(__file__))
        test_ruleset_file_name = os.path.join(test_dir, 'data/ruleset-test.json')

        #FIXME this is only for AWS
        with open(os.path.join(test_dir, '../ScoutSuite/providers/aws/rules/rulesets/default.json'), 'rt') as f:
            ruleset = json.load(f)

        rule_counters = {'found': 0, 'tested': 0, 'verified': 0}
        for file_name in ruleset['rules']:
            rule_counters['found'] += 1
            test_config_file_name = os.path.join(test_dir, 'data/rule-configs/%s' % file_name)
            if not os.path.isfile(test_config_file_name):
                continue
            rule_counters['tested'] += 1
            test_ruleset = {'rules': {}, 'about': 'regression test'}
            test_ruleset['rules'][file_name] = []
            rule = ruleset['rules'][file_name][0]
            rule['enabled'] = True
            test_ruleset['rules'][file_name].append(rule)
            with open(test_ruleset_file_name, 'wt') as f:
                f.write(json.dumps(test_ruleset, indent=4))
            #            printError('Ruleset ::')
            #            printError(str(test_ruleset))
            rules = Ruleset(filename=test_ruleset_file_name)
            pe = ProcessingEngine(rules)
            with open(test_config_file_name, 'rt') as f:
                dummy_provider = DummyObject()
                test_config_dict = json.load(f)
                for key in test_config_dict:
                    setattr(dummy_provider, key, test_config_dict[key])
            pe.run(dummy_provider)
            service = file_name.split('-')[0]
            findings = dummy_provider.services[service]['findings']
            findings = findings[list(findings.keys())[0]]['items']
            test_result_file_name = os.path.join(test_dir, 'data/rule-results/%s' % file_name)
            if not os.path.isfile(test_result_file_name):
                printError('Expected findings:: ')
                printError(json.dumps(findings, indent=4))
                continue
            rule_counters['verified'] += 1
            with open(test_result_file_name, 'rt') as f:
                items = json.load(f)
            try:
                assert (set(sorted(findings)) == set(sorted(items)))
            except Exception as e:
                printError('Expected items:\n %s' % json.dumps(sorted(items)))
                printError('Reported items:\n %s' % json.dumps(sorted(findings)))
                assert (False)
        printError('Existing  rules: %d' % rule_counters['found'])
        printError('Processed rules: %d' % rule_counters['tested'])
        printError('Verified  rules: %d' % rule_counters['verified'])
Exemplo n.º 35
0
    def _get_projects(self, parent_type, parent_id):
        """
        Returns all the projects in a given organization or folder. For a project_id it only returns the project
        details.
        """

        if parent_type not in ['project', 'organization', 'folder', 'all']:
            return None

        projects = []

        # FIXME can't currently be done with API client library as it consumes v1 which doesn't support folders
        """

        resource_manager_client = resource_manager.Client(credentials=self.credentials)

        project_list = resource_manager_client.list_projects()

        for p in project_list:
            if p.parent['id'] == self.organization_id and p.status == 'ACTIVE':
                projects.append(p.project_id)
        """

        resource_manager_client_v1 = gcp_connect_service(
            service='cloudresourcemanager', credentials=self.credentials)
        resource_manager_client_v2 = gcp_connect_service(
            service='cloudresourcemanager-v2', credentials=self.credentials)

        try:
            if parent_type == 'project':
                project_response = resource_manager_client_v1.projects().list(
                    filter='id:%s' % parent_id).execute()
                if 'projects' in project_response.keys():
                    for project in project_response['projects']:
                        if project['lifecycleState'] == "ACTIVE":
                            projects.append(project)

            elif parent_type == 'all':
                project_response = resource_manager_client_v1.projects().list(
                ).execute()
                if 'projects' in project_response.keys():
                    for project in project_response['projects']:
                        if project['lifecycleState'] == "ACTIVE":
                            projects.append(project)
            else:

                # get parent children projects
                request = resource_manager_client_v1.projects().list(
                    filter='parent.id:%s' % parent_id)
                while request is not None:
                    response = request.execute()

                    if 'projects' in response.keys():
                        for project in response['projects']:
                            if project['lifecycleState'] == "ACTIVE":
                                projects.append(project)

                    request = resource_manager_client_v1.projects().list_next(
                        previous_request=request, previous_response=response)

                # get parent children projects in children folders recursively
                folder_response = resource_manager_client_v2.folders().list(
                    parent='%ss/%s' % (parent_type, parent_id)).execute()
                if 'folders' in folder_response.keys():
                    for folder in folder_response['folders']:
                        projects.extend(
                            self._get_projects(
                                "folder", folder['name'].strip(u'folders/')))

            printInfo("Found {} project(s) to scan.".format(len(projects)))

        except Exception as e:
            printError('Unable to list accessible Projects')
            printException(e)

        finally:
            return projects
Exemplo n.º 36
0
def match_security_groups_and_resources_callback(aws_config, current_config, path, current_path, resource_id, callback_args):
    service = current_path[1]
    original_resource_path = combine_paths(copy.deepcopy(current_path), [ resource_id ])
    resource = get_object_at(aws_config, original_resource_path)
    if not 'resource_id_path' in callback_args:
        resource_type = current_path[-1]
        resource_path = copy.deepcopy(current_path)
        resource_path.append(resource_id)
    else:
        resource_path = combine_paths(copy.deepcopy(current_path), callback_args['resource_id_path'])
        resource_id = resource_path[-1]
        resource_type = resource_path[-2]
    if 'status_path' in callback_args:
        status_path = combine_paths(copy.deepcopy(original_resource_path), callback_args['status_path'])
        resource_status = get_object_at(aws_config, status_path).replace('.', '_')
    else:
        resource_status = None
    unknown_vpc_id = True if current_path[4] != 'vpcs' else False
    # Issue 89 & 91 : can instances have no security group?
    try:
        try:
            sg_attribute = get_object_at(resource, callback_args['sg_list_attribute_name'])
        except:
            return
        if type(sg_attribute) != list:
            sg_attribute = [ sg_attribute ]
        for resource_sg in sg_attribute:
            if type(resource_sg) == dict:
                sg_id = resource_sg[callback_args['sg_id_attribute_name']]
            else:
                sg_id = resource_sg
            if unknown_vpc_id:
                vpc_id = sg_map[sg_id]['vpc_id']
                sg_base_path = copy.deepcopy(current_path[0:4])
                sg_base_path[1] = 'ec2'
                sg_base_path = sg_base_path + [ 'vpcs', vpc_id, 'security_groups' ]
            else:
                sg_base_path = copy.deepcopy(current_path[0:6])
                sg_base_path[1] = 'ec2'
                sg_base_path.append('security_groups')
            sg_path = copy.deepcopy(sg_base_path)
            sg_path.append(sg_id)
            sg = get_object_at(aws_config, sg_path)
            # Add usage information
            manage_dictionary(sg, 'used_by', {})
            manage_dictionary(sg['used_by'], service, {})
            manage_dictionary(sg['used_by'][service], 'resource_type', {})
            manage_dictionary(sg['used_by'][service]['resource_type'], resource_type, {} if resource_status else [])
            if resource_status:
                manage_dictionary(sg['used_by'][service]['resource_type'][resource_type], resource_status, [])
                if not resource_id in sg['used_by'][service]['resource_type'][resource_type][resource_status]:
                    sg['used_by'][service]['resource_type'][resource_type][resource_status].append(resource_id)
            else:
                sg['used_by'][service]['resource_type'][resource_type].append(resource_id)
    except Exception as e:
        if resource_type in ['elbs', 'functions']:
            pass
        else:
            region = current_path[3]
            vpc_id = current_path[5]
            if vpc_id == ec2_classic:
                pass
            else:
                printError('Failed to parse %s in %s in %s' % (resource_type, vpc_id, region))
                printException(e)
Exemplo n.º 37
0
    def authenticate(self, key_file=None, user_account=None, service_account=None, **kargs):
        """
        Implement authentication for the GCP provider
        Refer to https://google-auth.readthedocs.io/en/stable/reference/google.auth.html.

        :return:
        """

        if user_account:
            # disable GCP warning about using User Accounts
            warnings.filterwarnings("ignore", "Your application has authenticated using end user credentials")
            pass  # Nothing more to do
        elif service_account:
            client_secrets_path = os.path.abspath(key_file)  # TODO this is probably wrong
            os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = client_secrets_path
        else:
            printError('Failed to authenticate to GCP - no supported account type')
            return False

        try:

            self.credentials, project_id = google.auth.default()
            if self.credentials:

                if self.project_id:
                    # service_account credentials with project_id will follow this path
                    self.projects = self._get_projects(parent_type='project',
                                                       parent_id=self.project_id)
                    self.aws_account_id = self.project_id # FIXME this is for AWS
                    self.profile = self.project_id # FIXME this is for AWS

                elif self.organization_id:
                    self.projects = self._get_projects(parent_type='organization',
                                                       parent_id=self.organization_id)
                    self.aws_account_id = self.organization_id # FIXME this is for AWS
                    self.profile = self.organization_id # FIXME this is for AWS

                elif self.folder_id:
                    self.projects = self._get_projects(parent_type='folder',
                                                       parent_id=self.folder_id)
                    self.aws_account_id = self.folder_id # FIXME this is for AWS
                    self.profile = self.folder_id # FIXME this is for AWS

                elif service_account: # We know that project_id hasn't been provided and that we have a service account
                    self.projects = self._get_projects(parent_type='service-account',
                                                       parent_id=self.project_id)
                    self.aws_account_id = self.credentials.service_account_email # FIXME this is for AWS
                    self.profile = self.credentials.service_account_email # FIXME this is for AWS

                else:
                    # FIXME this will fail if no default project is set in gcloud config. This is caused because html.py is looking for a profile to build the report
                    self.project_id = project_id
                    self.projects = self._get_projects(parent_type='project',
                                                       parent_id=self.project_id)
                    self.aws_account_id = self.project_id # FIXME this is for AWS
                    self.profile = self.project_id # FIXME this is for AWS

                # TODO this shouldn't be done here? but it has to in order to init with projects...
                self.services.set_projects(projects=self.projects)

                return True
            else:
                return False

        except google.auth.exceptions.DefaultCredentialsError as e:
            printError('Failed to authenticate to GCP')
            printException(e)
            return False

        except googleapiclient.errors.HttpError as e:
            printError('Failed to authenticate to GCP')
            printException(e)
            return False
Exemplo n.º 38
0
    def run(self, cloud_provider, skip_dashboard=False):
        # Clean up existing findings
        for service in cloud_provider.services:
            cloud_provider.services[service][self.ruleset.rule_type] = {}

        # Process each rule
        for finding_path in self._filter_rules(self.rules,
                                               cloud_provider.service_list):
            for rule in self.rules[finding_path]:

                if not rule.enabled:  # or rule.service not in []: # TODO: handle this...
                    continue

                printDebug('Processing %s rule[%s]: "%s"' %
                           (rule.service, rule.filename, rule.description))
                finding_path = rule.path
                path = finding_path.split('.')
                service = path[0]
                manage_dictionary(cloud_provider.services[service],
                                  self.ruleset.rule_type, {})
                cloud_provider.services[service][self.ruleset.rule_type][
                    rule.key] = {}
                cloud_provider.services[service][self.ruleset.rule_type][
                    rule.key]['description'] = rule.description
                cloud_provider.services[service][self.ruleset.rule_type][
                    rule.key]['path'] = rule.path
                for attr in ['level', 'id_suffix', 'display_path']:
                    if hasattr(rule, attr):
                        cloud_provider.services[service][
                            self.ruleset.rule_type][rule.key][attr] = getattr(
                                rule, attr)
                try:
                    setattr(rule, 'checked_items', 0)
                    cloud_provider.services[service][self.ruleset.rule_type][
                        rule.key]['items'] = recurse(cloud_provider.services,
                                                     cloud_provider.services,
                                                     path, [], rule, True)
                    if skip_dashboard:
                        continue
                    cloud_provider.services[service][self.ruleset.rule_type][
                        rule.key]['dashboard_name'] = rule.dashboard_name
                    cloud_provider.services[service][self.ruleset.rule_type][
                        rule.key]['checked_items'] = rule.checked_items
                    cloud_provider.services[service][self.ruleset.rule_type][
                        rule.key]['flagged_items'] = len(
                            cloud_provider.services[service][
                                self.ruleset.rule_type][rule.key]['items'])
                    cloud_provider.services[service][self.ruleset.rule_type][
                        rule.key]['service'] = rule.service
                    cloud_provider.services[service][self.ruleset.rule_type][
                        rule.key]['rationale'] = rule.rationale if hasattr(
                            rule, 'rationale') else 'No description available.'
                except Exception as e:
                    printException(e)
                    printError('Failed to process rule defined in %s' %
                               rule.filename)
                    # Fallback if process rule failed to ensure report creation and data dump still happen
                    cloud_provider.services[service][self.ruleset.rule_type][
                        rule.key]['checked_items'] = 0
                    cloud_provider.services[service][self.ruleset.rule_type][
                        rule.key]['flagged_items'] = 0
Exemplo n.º 39
0
def match_security_groups_and_resources_callback(aws_config, current_config,
                                                 path, current_path,
                                                 resource_id, callback_args):
    service = current_path[1]
    original_resource_path = combine_paths(copy.deepcopy(current_path),
                                           [resource_id])
    resource = get_object_at(aws_config, original_resource_path)
    if not 'resource_id_path' in callback_args:
        resource_type = current_path[-1]
        resource_path = copy.deepcopy(current_path)
        resource_path.append(resource_id)
    else:
        resource_path = combine_paths(copy.deepcopy(current_path),
                                      callback_args['resource_id_path'])
        resource_id = resource_path[-1]
        resource_type = resource_path[-2]
    #print('Resource path: %s' % resource_path)
    #print('Resource type: %s' % resource_type)
    #print('Resource id: %s' % resource_id)
    if 'status_path' in callback_args:
        status_path = combine_paths(copy.deepcopy(original_resource_path),
                                    callback_args['status_path'])
        #print('Status path: %s' % status_path)
        resource_status = get_object_at(aws_config, status_path)
    else:
        resource_status = None
    sg_base_path = copy.deepcopy(current_path[0:6])
    sg_base_path[1] = 'ec2'
    sg_base_path.append('security_groups')
    # Issue 89 & 91 : can instances have no security group?
    try:
        for resource_sg in resource[callback_args['sg_list_attribute_name']]:
            sg_id = resource_sg[callback_args['sg_id_attribute_name']]
            sg_path = copy.deepcopy(sg_base_path)
            sg_path.append(sg_id)
            sg = get_object_at(aws_config, sg_path)
            # Add usage information
            manage_dictionary(sg, 'used_by', {})
            manage_dictionary(sg['used_by'], service, {})
            manage_dictionary(sg['used_by'][service], 'resource_type', {})
            manage_dictionary(sg['used_by'][service]['resource_type'],
                              resource_type, {} if resource_status else [])
            if resource_status:
                manage_dictionary(
                    sg['used_by'][service]['resource_type'][resource_type],
                    resource_status, [])
                if not resource_id in sg['used_by'][service]['resource_type'][
                        resource_type][resource_status]:
                    sg['used_by'][service]['resource_type'][resource_type][
                        resource_status].append(resource_id)
            else:
                sg['used_by'][service]['resource_type'][resource_type].append(
                    resource_id)
    except Exception as e:
        region = current_path[3]
        vpc_id = current_path[5]
        if vpc_id == ec2_classic and resource_type == 'elbs':
            pass
        else:
            printError('Failed to parse %s in %s in %s' %
                       (resource_type, vpc_id, region))
            printException(e)
Exemplo n.º 40
0
def main():

    # Parse arguments
    parser = ListallArgumentParser()
    args = parser.parse_args()

    # Configure the debug level
    configPrintException(args.debug)

    # Check version of opinel
    if not check_requirements(os.path.realpath(__file__)):
        return 42

    # Support multiple environments
    for profile_name in args.profile:

        # Load the config
        try:
            report = Scout2Report(args.provider, profile_name, args.report_dir,
                                  args.timestamp)
            aws_config = report.jsrw.load_from_file(AWSCONFIG)
            services = aws_config['service_list']
        except Exception as e:
            printException(e)
            printError(
                'Error, failed to load the configuration for profile %s' %
                profile_name)
            continue

        # Create a ruleset with only whatever rules were specified...
        if args.config:
            rule_filename = args.config
            ruleset = TmpRuleset(rule_dirs=[os.getcwd()],
                                 rule_filename=args.config,
                                 rule_args=args.config_args)
        elif len(args.path) > 0:
            # Create a local tmp rule
            rule_dict = {'description': 'artifact'}
            rule_dict['path'] = args.path[0]
            rule_dict['conditions'] = []
            rule_filename = 'listall-artifact.json'
            with open(os.path.join(os.getcwd(), rule_filename), 'wt') as f:
                f.write(json.dumps(rule_dict))
            ruleset = TmpRuleset(rule_dirs=[os.getcwd()],
                                 rule_filename=rule_filename,
                                 rule_args=[])
        else:
            printError(
                'Error, you must provide either a rule configuration file or the path to the resources targeted.'
            )
            continue

        # Process the rule
        pe = ProcessingEngine(ruleset)
        pe.run(aws_config, skip_dashboard=True)

        # Retrieve items
        rule = ruleset.rules[rule_filename][0]
        rule_service = rule.service.lower()
        rule_key = rule.key
        rule_type = rule.rule_type
        resources = aws_config['services'][rule_service][rule_type][rule_key][
            'items']

        # Set the keys to output
        if len(args.keys):
            # 1. Explicitly provided on the CLI
            rule.keys = args.keys
        elif len(args.keys_file):
            # 2. Explicitly provided files that contain the list of keys
            rule.keys = []
            for filename in args.keys_file:
                with open(filename, 'rt') as f:
                    rule.keys += json.load(f)['keys']
        else:
            try:
                # 3. Load default set of keys based on path
                target_path = rule.display_path if hasattr(
                    rule, 'display_path') else rule.path
                listall_configs_dir = os.path.join(
                    os.path.dirname(os.path.realpath(__file__)),
                    'output/data/listall-configs')
                target_file = os.path.join(listall_configs_dir,
                                           '%s.json' % target_path)
                if os.path.isfile(target_file):
                    with open(target_file, 'rt') as f:
                        rule.keys = json.load(f)['keys']
            except:
                # 4. Print the object name
                rule.keys = ['name']

        # Prepare the output format
        (lines, template) = format_listall_output(args.format_file[0], None,
                                                  args.format, rule)

        # Print the output
        printInfo(
            generate_listall_output(lines, resources, aws_config, template,
                                    []))
Exemplo n.º 41
0
def get_value_at(all_info, current_path, key, to_string=False):
    """
    Get value located at a given path.

    :param all_info:        All of the services' data
    :param current_path:    The value of the `path` variable defined in the finding file
    :param key:             The key that is being requested
    :param to_string:       Whether or not the returned value should be casted as a string
    :return:                The value in `all_info` indicated by the `key` in `current_path`
    """
    keys = key.split('.')
    if keys[-1] == 'id':
        target_obj = current_path[len(keys) - 1]
    else:
        if key == 'this':
            target_path = current_path
        elif '.' in key:
            target_path = []
            for i, key in enumerate(keys):
                # If 'id', replace by value
                if key == 'id':
                    target_path.append(current_path[i])
                # If empty key and value is an index, keep the index
                elif key == '' and i < len(
                        current_path) and current_path[i].isdigit():
                    target_path.append(int(current_path[i]))
                # Otherwise, use key
                else:
                    target_path.append(key)
            if len(keys) > len(current_path):
                target_path = target_path + keys[len(target_path):]
        else:
            target_path = copy.deepcopy(current_path)
            target_path.append(key)
        target_obj = all_info
        for p in target_path:
            try:
                if type(target_obj) == list and type(target_obj[0]) == dict:
                    target_obj = target_obj[int(p)]
                # TODO ensure this additional condition didn't break anything
                elif type(target_obj) == list and type(p) == int:
                    target_obj = target_obj[p]
                elif type(target_obj) == list:
                    target_obj = p
                elif p == '':
                    target_obj = target_obj
                else:
                    try:
                        target_obj = target_obj[p]
                    except Exception as e:
                        printError('Current path: %s' % str(current_path))
                        printException(e)
                        raise Exception
            except Exception as e:
                printError('Current path: %s' % str(current_path))
                printException(e)
                raise Exception
    if to_string:
        return str(target_obj)
    else:
        return target_obj
Exemplo n.º 42
0
    def test_all_finding_rules(self):
        test_dir = os.path.dirname(os.path.realpath(__file__))
        test_ruleset_file_name = os.path.join(test_dir, 'data/ruleset-test.json')
        with open(os.path.join(test_dir, '../AWSScout2/rules/data/rulesets/default.json'), 'rt') as f:
            ruleset = json.load(f)
        rule_counters = {'found': 0, 'tested': 0, 'verified': 0}
        for file_name in ruleset['rules']:
            rule_counters['found'] += 1
            test_config_file_name = os.path.join(test_dir, 'data/rule-configs/%s' % file_name)
            if not os.path.isfile(test_config_file_name):
                continue
            rule_counters['tested'] += 1
            test_ruleset = {'rules': {}, 'about': 'regression test'}
            test_ruleset['rules'][file_name] = []
            rule = ruleset['rules'][file_name][0]
            rule['enabled'] = True
            test_ruleset['rules'][file_name].append(rule)
            with open(test_ruleset_file_name, 'wt') as f:
                f.write(json.dumps(test_ruleset, indent = 4))
#            printError('Ruleset ::')
#            printError(str(test_ruleset))
            rules = Ruleset(filename = test_ruleset_file_name)
            pe = ProcessingEngine(rules)
            with open(test_config_file_name, 'rt') as f:
                aws_config = json.load(f)
            pe.run(aws_config)
            service = file_name.split('-')[0]
            findings = aws_config['services'][service]['findings']
            findings = findings[list(findings.keys())[0]]['items']
            test_result_file_name = os.path.join(test_dir, 'data/rule-results/%s' % file_name)
            if not os.path.isfile(test_result_file_name):
                printError('Expected findings:: ')
                printError(json.dumps(findings, indent = 4))
                continue
            rule_counters['verified'] += 1
            with open(test_result_file_name, 'rt') as f:
                items = json.load(f)
            try:
                assert(set(sorted(findings)) == set(sorted(items)))
            except Exception as e:
                printError('Expected items:\n %s' % json.dumps(sorted(items)))
                printError('Reported items:\n %s' % json.dumps(sorted(findings)))
                assert(False)
        printError('Existing  rules: %d' % rule_counters['found'])
        printError('Processed rules: %d' % rule_counters['tested'])
        printError('Verified  rules: %d' % rule_counters['verified'])
Exemplo n.º 43
0
def main():

    # Parse arguments
    parser = ListallArgumentParser()
    args = parser.parse_args()

    # Configure the debug level
    configPrintException(args.debug)

    # Check version of opinel
    if not check_requirements(os.path.realpath(__file__)):
        return 42

    # Support multiple environments
    for profile_name in args.profile:

        # Load the config
        try:
            report = Scout2Report(profile_name, args.report_dir, args.timestamp)
            aws_config = report.jsrw.load_from_file(AWSCONFIG)
            services = aws_config['service_list']
        except Exception as e:
            printException(e)
            printError('Error, failed to load the configuration for profile %s' % profile_name)
            continue


        # Create a ruleset with only whatever rules were specified...
        if args.config:
            rule_filename = args.config
            ruleset = TmpRuleset(rule_dirs = [os.getcwd()], rule_filename = args.config, rule_args = args.config_args)
        elif len(args.path) > 0:
            # Create a local tmp rule
            rule_dict = {'description': 'artifact'}
            rule_dict['path'] = args.path[0]
            rule_dict['conditions'] = []
            rule_filename = 'listall-artifact.json'
            with open(os.path.join(os.getcwd(), rule_filename), 'wt') as f:
                f.write(json.dumps(rule_dict))
            ruleset = TmpRuleset(rule_dirs = [os.getcwd()], rule_filename = rule_filename, rule_args = [])
        else:
            printError('Error, you must provide either a rule configuration file or the path to the resources targeted.')
            continue


        # Process the rule
        pe = ProcessingEngine(ruleset)
        pe.run(aws_config, skip_dashboard = True)

        # Retrieve items
        rule = ruleset.rules[rule_filename][0]
        rule_service = rule.service.lower()
        rule_key = rule.key
        rule_type = rule.rule_type
        resources = aws_config['services'][rule_service][rule_type][rule_key]['items']

        # Set the keys to output
        if len(args.keys):
            # 1. Explicitly provided on the CLI
            rule.keys = args.keys
        elif len(args.keys_file):
            # 2. Explicitly provided files that contain the list of keys
            rule.keys = []
            for filename in args.keys_file:
                with open(filename, 'rt') as f:
                    rule.keys += json.load(f)['keys']
        else:
            try:
                # 3. Load default set of keys based on path
                target_path = rule.display_path if hasattr(rule, 'display_path') else rule.path
                listall_configs_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'output/data/listall-configs')
                target_file = os.path.join(listall_configs_dir, '%s.json' % target_path)
                if os.path.isfile(target_file):
                    with open(target_file, 'rt') as f:
                        rule.keys = json.load(f)['keys']
            except:
                # 4. Print the object name
                rule.keys = ['name']

        # Prepare the output format
        (lines, template) = format_listall_output(args.format_file[0], None, args.format, rule)

        # Print the output
        printInfo(generate_listall_output(lines, resources, aws_config, template, []))
Exemplo n.º 44
0
def read_creds(profile_name,
               csv_file=None,
               mfa_serial_arg=None,
               mfa_code=None,
               force_init=False,
               role_session_name='opinel'):
    """
    Read credentials from anywhere (CSV, Environment, Instance metadata, config/credentials)

    :param profile_name:
    :param csv_file:
    :param mfa_serial_arg:
    :param mfa_code:
    :param force_init:
    :param role_session_name:

    :return:
    """
    first_sts_session = False
    expiration = None
    credentials = init_creds()
    if csv_file:
        # Read credentials from a CSV file that was provided
        credentials['AccessKeyId'], credentials[
            'SecretAccessKey'], credentials[
                'SerialNumber'] = read_creds_from_csv(csv_file)
    elif profile_name == 'default':
        # Try reading credentials from environment variables (Issue #11) if the profile name is 'default'
        credentials = read_creds_from_environment_variables()
    if ('AccessKeyId' not in credentials or not credentials['AccessKeyId']
        ) and not csv_file and profile_name == 'default':
        credentials = read_creds_from_ec2_instance_metadata()
    if not credentials['AccessKeyId'] and not csv_file:
        # Lookup if a role is defined in ~/.aws/config
        role_arn, source_profile, role_mfa_serial = read_profile_from_aws_config_file(
            profile_name)
        if role_arn and source_profile:
            # Lookup cached credentials
            try:
                cached_credentials_filename = get_cached_credentials_filename(
                    profile_name, role_arn)
                with open(cached_credentials_filename, 'rt') as f:
                    assume_role_data = json.load(f)
                    credentials = assume_role_data['Credentials']
                    expiration = dateutil.parser.parse(
                        credentials['Expiration'])
                    expiration = expiration.replace(tzinfo=None)
                    current = datetime.datetime.utcnow()
                    if expiration < current:
                        print('Role\'s credentials have expired on %s' %
                              credentials['Expiration'])
            except Exception as e:
                pass
            if not expiration or expiration < current or credentials[
                    'AccessKeyId'] == None:
                credentials = read_creds(source_profile)
                if role_mfa_serial:
                    credentials['SerialNumber'] = role_mfa_serial
                    # Auto prompt for a code...
                    if not mfa_code:
                        credentials['TokenCode'] = prompt_4_mfa_code()
                credentials = assume_role(profile_name, credentials, role_arn,
                                          role_session_name)
        # Read from ~/.aws/credentials
        else:
            credentials = read_creds_from_aws_credentials_file(profile_name)
            if credentials['SessionToken']:
                if 'Expiration' in credentials and credentials['Expiration']:
                    expiration = dateutil.parser.parse(
                        credentials['Expiration'])
                    expiration = expiration.replace(tzinfo=None)
                    current = datetime.datetime.utcnow()
                    if expiration < current:
                        printInfo('Saved STS credentials expired on %s' %
                                  credentials['Expiration'])
                        force_init = True
                else:
                    force_init = True
                    sts_credentials = credentials
            else:
                first_sts_session = True
            if force_init or (mfa_serial_arg and mfa_code):
                credentials = read_creds_from_aws_credentials_file(
                    profile_name if first_sts_session else '%s-nomfa' %
                    profile_name)
                if not credentials['AccessKeyId']:
                    printInfo(
                        'Warning: Unable to determine STS token expiration; later API calls may fail.'
                    )
                    credentials = sts_credentials
                else:
                    if mfa_serial_arg:
                        credentials['SerialNumber'] = mfa_serial_arg
                    if mfa_code:
                        credentials['TokenCode'] = mfa_code
                    if 'AccessKeyId' in credentials and credentials[
                            'AccessKeyId']:
                        credentials = init_sts_session(profile_name,
                                                       credentials)
    # If we don't have valid creds by now, print an error message
    if 'AccessKeyId' not in credentials or credentials[
            'AccessKeyId'] == None or 'SecretAccessKey' not in credentials or credentials[
                'SecretAccessKey'] == None:
        printError(
            'Error: could not find AWS credentials. Use the --help option for more information.'
        )
    if not 'AccessKeyId' in credentials:
        credentials = {'AccessKeyId': None}
    return credentials
Exemplo n.º 45
0
    def _get_targets(self, response_attribute, api_client, method, list_params, ignore_list_error):
        """
        Fetch the targets, required as each provider may have particularities

        :return:
        """

        targets = []

        try:

            regions = self.get_regions()
            zones = self.get_zones()

            # Create a list with all combinations for method parameters
            list_params_list = []

            # Dict for all the elements to combine
            combination_elements = {'project_placeholder': [project['projectId'] for project in self.projects],
                                    'region_placeholder': regions,
                                    'zone_placeholder': zones}

            # Get a list of {{}} terms
            sources = re.findall("{{(.*?)}}", str(list_params.values()))
            # Remove keys from combinations if they aren't in the sources
            confirmed_combination_elements = {}
            for source in sources:
                confirmed_combination_elements[source] = combination_elements[source]
            # Build a list of the possible combinations
            combinations = self._dict_product(confirmed_combination_elements)
            for combination in combinations:
                l = list_params.copy()
                for k, v in l.items():
                    k1 = re.findall("{{(.*?)}}", v)
                    if k1:
                        l[k] = l[k].replace('{{%s}}' % k1[0], combination[k1[0]])
                list_params_list.append(l)

            for list_params_combination in list_params_list:

                try:

                    if self.library_type == 'cloud_client_library':

                        # TODO this should be more modular
                        # this is only for stackdriverlogging
                        if self.service == 'stackdriverlogging':
                            api_client.project = list_params_combination.pop('project')

                        response = method(**list_params_combination)

                        # TODO this should be more modular
                        # this is only for kubernetesengine
                        if isinstance(response, container_v1.types.ListClustersResponse):
                            targets += response.clusters
                        else:
                            targets += list(response)

                        # Remove client as it's unpickleable and adding the object to the Queue will pickle
                        # The client is later re-inserted in each Config
                        for t in targets:
                            if hasattr(t, '_client'):
                                t._client = None

                    if self.library_type == 'api_client_library':

                        # TODO need to handle long responses
                        request = method(**list_params_combination)
                        while request is not None:
                            response = request.execute()

                            if 'items' in response:
                                targets += response['items']
                            # TODO this should be more modular
                            # this is only for cloudresourcemanager
                            if 'bindings' in response:
                                targets += response['bindings']
                            # TODO this should be more modular
                            # this is only for IAM
                            if 'accounts' in response:
                                targets += response['accounts']

                            # TODO need to define the _next to handle long responses
                            # request = method_next(previous_request=request,
                            #                       previous_response=response)
                            request = None

                except HttpError as e:
                    error_json = json.loads(e.content)
                    if error_json['error']['message'] not in self.error_list:
                        self.error_list.append(error_json['error']['message'])
                        printError(error_json['error']['message'])

                except PermissionDenied as e:
                    printError("%s: %s - %s" % (e.message, self.service, self.targets))

                except Exception as e:
                    printException(e)

        except HttpError as e:
            error_json = json.loads(e.content)
            if error_json['error']['message'] not in self.error_list:
                self.error_list.append(error_json['error']['message'])
                printError(error_json['error']['message'])

        except Exception as e:
            printException(e)

        finally:
            return targets
Exemplo n.º 46
0
def match_security_groups_and_resources_callback(aws_config, current_config, path, current_path, resource_id, callback_args):
    service = current_path[1]
    original_resource_path = combine_paths(copy.deepcopy(current_path), [ resource_id ])
    resource = get_object_at(aws_config, original_resource_path)
    if not 'resource_id_path' in callback_args:
        resource_type = current_path[-1]
        resource_path = copy.deepcopy(current_path)
        resource_path.append(resource_id)
    else:
        resource_path = combine_paths(copy.deepcopy(current_path), callback_args['resource_id_path'])
        resource_id = resource_path[-1]
        resource_type = resource_path[-2]
    if 'status_path' in callback_args:
        status_path = combine_paths(copy.deepcopy(original_resource_path), callback_args['status_path'])
        resource_status = get_object_at(aws_config, status_path).replace('.', '_')
    else:
        resource_status = None
    unknown_vpc_id = True if current_path[4] != 'vpcs' else False
    # Issue 89 & 91 : can instances have no security group?
    try:
        try:
            sg_attribute = get_object_at(resource, callback_args['sg_list_attribute_name'])
        except:
            return
        if type(sg_attribute) != list:
            sg_attribute = [ sg_attribute ]
        for resource_sg in sg_attribute:
            if type(resource_sg) == dict:
                sg_id = resource_sg[callback_args['sg_id_attribute_name']]
            else:
                sg_id = resource_sg
            if unknown_vpc_id:
                vpc_id = sg_map[sg_id]['vpc_id']
                sg_base_path = copy.deepcopy(current_path[0:4])
                sg_base_path[1] = 'ec2'
                sg_base_path = sg_base_path + [ 'vpcs', vpc_id, 'security_groups' ]
            else:
                sg_base_path = copy.deepcopy(current_path[0:6])
                sg_base_path[1] = 'ec2'
                sg_base_path.append('security_groups')
            sg_path = copy.deepcopy(sg_base_path)
            sg_path.append(sg_id)
            sg = get_object_at(aws_config, sg_path)
            # Add usage information
            manage_dictionary(sg, 'used_by', {})
            manage_dictionary(sg['used_by'], service, {})
            manage_dictionary(sg['used_by'][service], 'resource_type', {})
            manage_dictionary(sg['used_by'][service]['resource_type'], resource_type, {} if resource_status else [])
            if resource_status:
                manage_dictionary(sg['used_by'][service]['resource_type'][resource_type], resource_status, [])
                if not resource_id in sg['used_by'][service]['resource_type'][resource_type][resource_status]:
                    sg['used_by'][service]['resource_type'][resource_type][resource_status].append(resource_id)
            else:
                sg['used_by'][service]['resource_type'][resource_type].append(resource_id)
    except Exception as e:
        region = current_path[3]
        vpc_id = current_path[5]
        if vpc_id == ec2_classic and resource_type == 'elbs':
            pass
        else:
            printError('Failed to parse %s in %s in %s' % (resource_type, vpc_id, region))
            printException(e)
Exemplo n.º 47
0
def main():

    # Parse arguments
    parser = OpinelArgumentParser()
    parser.add_argument('debug')
    parser.add_argument('profile')
    parser.add_argument('regions')
    parser.add_argument('partition-name')
    parser.add_argument('bucket-name')
    parser.parser.add_argument('--aws-account-id',
                                dest='aws_account_id',
                                default=[ None ],
                                nargs='+',
                                help='Bleh.')
    parser.parser.add_argument('--from',
                                dest='from_date',
                                default=[ None ],
                                nargs='+',
                                help='Bleh.')
    parser.parser.add_argument('--to',
                                dest='to_date',
                                default=[ None ],
                                nargs='+',
                                help='Bleh.')

    args = parser.parse_args()

    # Configure the debug level
    configPrintException(args.debug)

    # Check version of opinel
    if not check_requirements(os.path.realpath(__file__)):
        return 42



    # Arguments
    profile_name = args.profile[0]
    try:
        from_date = datetime.datetime.strptime(args.from_date[0], "%Y/%m/%d").date()
        to_date = datetime.datetime.strptime(args.to_date[0], "%Y/%m/%d").date()
        delta = to_date - from_date
    except Exception as e:
        printException(e)
        printError('Error: dates must be formatted of the following format YYYY/MM/DD')
        return 42
    if delta.days < 0:
        printError('Error: your \'to\' date is earlier than your \'from\' date')
        return 42

    # Search for AWS credentials
    credentials = read_creds(profile_name)
    if not credentials['AccessKeyId']:
        return 42

    # Fetch AWS account ID
    if not args.aws_account_id[0]:
        printInfo('Fetching the AWS account ID...')
        aws_account_id = get_aws_account_id(credentials)
    else:
        aws_account_id = args.aws_account_id[0]
    global cloudtrail_log_path
    cloudtrail_log_path = cloudtrail_log_path.replace('AWS_ACCOUNT_ID', aws_account_id)

    # Create download dir
    if not os.path.exists(download_folder):
        os.makedirs(download_folder)

    # Iterate through regions
    s3_clients = {}
    for region in build_region_list('cloudtrail', args.regions, args.partition_name):

        # Connect to CloudTrail
        cloudtrail_client = connect_service('cloudtrail', credentials, region)
        if not cloudtrail_client:
            continue

        # Get information about the S3 bucket that receives CloudTrail logs
        trails = cloudtrail_client.describe_trails()
        for trail in trails['trailList']:
            bucket_name = trail['S3BucketName']
            prefix = trail['S3KeyPrefix'] if 'S3KeyPrefix' in trail else ''

        # Connect to S3
        manage_dictionary(s3_clients, region, connect_service('s3', credentials, region))
        target_bucket_region = get_s3_bucket_location(s3_clients[region], bucket_name)
        manage_dictionary(s3_clients, target_bucket_region, connect_service('s3', credentials, target_bucket_region))
        s3_client = s3_clients[target_bucket_region]

        # Generate base path for files
        log_path = os.path.join(prefix, cloudtrail_log_path.replace('REGION', region))

        # Download files
        printInfo('Downloading log files in %s... ' % region, False)
        keys = []
        for i in range(delta.days + 1):
            day = from_date + timedelta(days=i)
            folder_path = os.path.join(log_path, day.strftime("%Y/%m/%d"))
            try:
                objects = handle_truncated_response(s3_client.list_objects, {'Bucket': bucket_name, 'Prefix': folder_path}, ['Contents'])
                for o in objects['Contents']:
                    keys.append([o['Key'], 0])
            except Exception as e:
                printException(e)
                pass
        thread_work(keys, download_object, params = {'Bucket': bucket_name, 'S3Client': s3_client}, num_threads = 100)
        printInfo('Done')

    # Iterate through files and gunzip 'em
    printInfo('Decompressing files...')
    gzlogs = []
    for root, dirnames, filenames in os.walk(download_folder):
        for filename in filenames:
            gzlogs.append(filename)
    thread_work(gzlogs, gunzip_file, num_threads = 30)
Exemplo n.º 48
0
def display_qr_code(png, seed):
    """
    Display MFA QR code
    :param png:
    :param seed:
    :return:
    """
    # This NamedTemporaryFile is deleted as soon as it is closed, so
    # return it to caller, who must close it (or program termination
    # could cause it to be cleaned up, that's fine too).
    # If we don't keep the file around until after the user has synced
    # his MFA, the file will possibly be already deleted by the time
    # the operating system gets around to execing the browser, if
    # we're using a browser.
    qrcode_file = tempfile.NamedTemporaryFile(suffix='.png',
                                              delete=True,
                                              mode='wt')
    qrcode_file.write(png)
    qrcode_file.flush()
    if _fabulous_available:
        fabulous.utils.term.bgcolor = 'white'
        with open(qrcode_file.name, 'rb') as png_file:
            print(fabulous.image.Image(png_file, 100))
    else:
        graphical_browsers = [
            webbrowser.BackgroundBrowser, webbrowser.Mozilla,
            webbrowser.Galeon, webbrowser.Chrome, webbrowser.Opera,
            webbrowser.Konqueror
        ]
        if sys.platform[:3] == 'win':
            graphical_browsers.append(webbrowser.WindowsDefault)
        elif sys.platform == 'darwin':
            graphical_browsers.append(webbrowser.MacOSXOSAScript)

        browser_type = None
        try:
            browser_type = type(webbrowser.get())
        except webbrowser.Error:
            pass

        if browser_type in graphical_browsers:
            printError(
                "Unable to print qr code directly to your terminal, trying a web browser."
            )
            webbrowser.open('file://' + qrcode_file.name)
        else:
            printInfo(
                "Unable to print qr code directly to your terminal, and no graphical web browser seems available."
            )
            printInfo(
                "But, the qr code file is temporarily available as this file:")
            printInfo("\n    %s\n" % qrcode_file.name)
            printInfo(
                "Alternately, if you feel like typing the seed manually into your MFA app:"
            )
            # this is a base32-encoded binary string (for case
            # insensitivity) which is then dutifully base64-encoded by
            # amazon before putting it on the wire.  so the actual
            # secret is b32decode(b64decode(seed)), and what users
            # will need to type in to their app is just
            # b64decode(seed).  print that out so users can (if
            # desperate) type in their MFA app.
            printInfo("\n    %s\n" % base64.b64decode(seed))
    return qrcode_file
Exemplo n.º 49
0
    def authenticate(self, user_account=None, service_account=None, **kargs):
        """
        Implement authentication for the GCP provider
        Refer to https://google-auth.readthedocs.io/en/stable/reference/google.auth.html.

        :return:
        """

        if user_account:
            # disable GCP warning about using User Accounts
            warnings.filterwarnings(
                "ignore",
                "Your application has authenticated using end user credentials"
            )
            pass  # Nothing more to do
        elif service_account:
            client_secrets_path = os.path.abspath(service_account)
            os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = client_secrets_path
        else:
            printError(
                'Failed to authenticate to GCP - no supported account type')
            return False

        try:

            self.credentials, project_id = google.auth.default()
            if self.credentials:

                # All projects to which the user / Service Account has access to
                if self.all_projects:
                    self.projects = self._get_projects(parent_type='all',
                                                       parent_id=None)
                    if service_account and hasattr(self.credentials,
                                                   'service_account_email'):
                        self.aws_account_id = self.credentials.service_account_email  # FIXME this is for AWS
                    else:
                        self.aws_account_id = 'GCP'  # FIXME this is for AWS
                    self.profile = 'GCP'  # FIXME this is for AWS
                # Project passed through the CLI
                elif self.project_id:
                    self.projects = self._get_projects(
                        parent_type='project', parent_id=self.project_id)
                    self.aws_account_id = self.project_id  # FIXME this is for AWS
                    self.profile = self.project_id  # FIXME this is for AWS

                # Folder passed through the CLI
                elif self.folder_id:
                    self.projects = self._get_projects(
                        parent_type='folder', parent_id=self.folder_id)
                    self.aws_account_id = self.folder_id  # FIXME this is for AWS
                    self.profile = self.folder_id  # FIXME this is for AWS

                # Organization passed through the CLI
                elif self.organization_id:
                    self.projects = self._get_projects(
                        parent_type='organization',
                        parent_id=self.organization_id)
                    self.aws_account_id = self.organization_id  # FIXME this is for AWS
                    self.profile = self.organization_id  # FIXME this is for AWS

                # Project inferred from default configuration
                elif project_id:
                    self.projects = self._get_projects(parent_type='project',
                                                       parent_id=project_id)
                    self.aws_account_id = project_id  # FIXME this is for AWS
                    self.profile = project_id  # FIXME this is for AWS

                # Raise exception if none of the above
                else:
                    printInfo(
                        "Could not infer the Projects to scan and no default Project ID was found."
                    )
                    return False

                # TODO this shouldn't be done here? but it has to in order to init with projects...
                self.services.set_projects(projects=self.projects)

                return True
            else:
                return False

        except google.auth.exceptions.DefaultCredentialsError as e:
            printError('Failed to authenticate to GCP')
            printException(e)
            return False

        except googleapiclient.errors.HttpError as e:
            printError('Failed to authenticate to GCP')
            printException(e)
            return False