def get_aws_info(): LOG.info("Collecting AWS info") aws = AwsInstance() info = {} if aws.is_aws_instance(): LOG.info("Machine is an AWS instance") info = \ { 'instance_id': aws.get_instance_id() } else: LOG.info("Machine is NOT an AWS instance") return info
def try_init_aws_instance(): # noinspection PyBroadException try: RemoteRunAwsService.aws_instance = AwsInstance() except Exception: logger.error("Failed init aws instance. Exception info: ", exc_info=True)
def init(): """ Initializes service. Subsequent calls to this function have no effect. Must be called at least once (in entire monkey lifetime) before usage of functions :return: None """ if RemoteRunAwsService.aws_instance is None: RemoteRunAwsService.aws_instance = AwsInstance()
class AwsEnvironment(Environment): def __init__(self): super(AwsEnvironment, self).__init__() self.aws_info = AwsInstance() self._instance_id = self._get_instance_id() self.region = self._get_region() def _get_instance_id(self): return self.aws_info.get_instance_id() def _get_region(self): return self.aws_info.get_region() def get_auth_users(self): return [ monkey_island.cc.auth.User(1, 'monkey', self.hash_secret(self._instance_id)) ]
class AwsEnvironment(Environment): def __init__(self): super(AwsEnvironment, self).__init__() # Not suppressing error here on purpose. This is critical if we're on AWS env. self.aws_info = AwsInstance() self._instance_id = self._get_instance_id() self.region = self._get_region() def _get_instance_id(self): return self.aws_info.get_instance_id() def _get_region(self): return self.aws_info.get_region() def get_auth_users(self): return [ monkey_island.cc.auth.User(1, 'monkey', self.hash_secret(self._instance_id)) ]
def handle_report(report_json): aws = AwsInstance() findings_list = [] issues_list = report_json['recommendations']['issues'] if not issues_list: logger.info( 'No issues were found by the monkey, no need to send anything') return True for machine in issues_list: for issue in issues_list[machine]: if issue.get('aws_instance_id', None): findings_list.append( AWSExporter._prepare_finding(issue, aws.get_region())) if not AWSExporter._send_findings( findings_list, AWSExporter._get_aws_keys(), aws.get_region()): logger.error('Exporting findings to aws failed') return False return True
def get_instances(): """ Get the information for all instances with the relevant roles. This function will assume that it's running on an EC2 instance with the correct IAM role. See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#iam-role for details. :raises: botocore.exceptions.ClientError if can't describe local instance information. :return: All visible instances from this instance """ current_instance = AwsInstance() local_ssm_client = boto3.client("ssm", current_instance.get_region()) try: response = local_ssm_client.describe_instance_information() filtered_instances_data = filter_instance_data_from_aws_response( response) return filtered_instances_data except botocore.exceptions.ClientError as e: logger.warning("AWS client error while trying to get instances: " + e.message) raise e
def _prepare_finding(issue, region): findings_dict = { 'island_cross_segment': AWSExporter._handle_island_cross_segment_issue, 'ssh': AWSExporter._handle_ssh_issue, 'shellshock': AWSExporter._handle_shellshock_issue, 'tunnel': AWSExporter._handle_tunnel_issue, 'elastic': AWSExporter._handle_elastic_issue, 'smb_password': AWSExporter._handle_smb_password_issue, 'smb_pth': AWSExporter._handle_smb_pth_issue, 'sambacry': AWSExporter._handle_sambacry_issue, 'shared_passwords': AWSExporter._handle_shared_passwords_issue, 'wmi_password': AWSExporter._handle_wmi_password_issue, 'wmi_pth': AWSExporter._handle_wmi_pth_issue, 'ssh_key': AWSExporter._handle_ssh_key_issue, 'shared_passwords_domain': AWSExporter._handle_shared_passwords_domain_issue, 'shared_admins_domain': AWSExporter._handle_shared_admins_domain_issue, 'strong_users_on_crit': AWSExporter._handle_strong_users_on_crit_issue, 'struts2': AWSExporter._handle_struts2_issue, 'weblogic': AWSExporter._handle_weblogic_issue, 'hadoop': AWSExporter._handle_hadoop_issue, # azure and conficker are not relevant issues for an AWS env } configured_product_arn = load_server_configuration_from_file( )['aws'].get('sec_hub_product_arn', '') product_arn = 'arn:aws:securityhub:{region}:{arn}'.format( region=region, arn=configured_product_arn) instance_arn = 'arn:aws:ec2:' + str(region) + ':instance:{instance_id}' account_id = AwsInstance().get_account_id() logger.debug("aws account id acquired: {}".format(account_id)) finding = { "SchemaVersion": "2018-10-08", "Id": uuid.uuid4().hex, "ProductArn": product_arn, "GeneratorId": issue['type'], "AwsAccountId": account_id, "RecordState": "ACTIVE", "Types": ["Software and Configuration Checks/Vulnerabilities/CVE"], "CreatedAt": datetime.now().isoformat() + 'Z', "UpdatedAt": datetime.now().isoformat() + 'Z', } return AWSExporter.merge_two_dicts( finding, findings_dict[issue['type']](issue, instance_arn))
def __init__(self): super(AwsEnvironment, self).__init__() self.aws_info = AwsInstance() self._instance_id = self._get_instance_id() self.region = self._get_region()
def __init__(self): super(AwsEnvironment, self).__init__() # Not suppressing error here on purpose. This is critical if we're on AWS env. self.aws_info = AwsInstance() self._instance_id = self._get_instance_id() self.region = self._get_region()