def test_config_version_socrata():
    c = Config(f"{Path(__file__).parent}/data/socrata.yml")
    uid = c.parsed_unrendered_template["dataset"]["source"]["socrata"]["uid"]
    version = c.version_socrata(uid)
    assert len(version) == 8  # format: YYYYMMDD
    assert int(version[-2:]) <= 31  # check date
    assert int(version[-4:-2]) <= 12  # check month
Beispiel #2
0
def collect_results(request_info, main_account):
    security_features = request_info['request_params']['security_features']
    regions = request_info['request_params']['regions']
    scan_account_id = request_info['request_params']['account_id']
    tags = request_info['request_params']['tags']
    response = dict({'global': {}})
    for region in regions:
        response[region] = {}
        for sec_feature in security_features:
            if sec_feature not in GLOBAL_SECURITY_FEATURES:
                response[region][sec_feature] = []
            else:
                response['global'][sec_feature] = []

    config = Config()
    for security_feature in security_features:
        sec_feature_config = config.get_module_config_by_name(security_feature)
        ddb_table = main_account.resource("dynamodb").Table(
            sec_feature_config.ddb_table_name)
        for issue in IssueOperations.get_account_open_issues(
                ddb_table, scan_account_id):
            if issue.contains_tags(tags) and (
                    issue.issue_details.region in regions
                    or security_feature in GLOBAL_SECURITY_FEATURES):
                issue_region = issue.issue_details.region if issue.issue_details.region else 'global'
                response[issue_region][security_feature].append({
                    'id':
                    issue.issue_id,
                    'issue_details':
                    issue.issue_details.as_dict()
                })
    return response
def lambda_handler(event, context):
    """ Lambda handler to initiate to find public RDS snapshots """
    set_logging(level=logging.INFO)
    logging.debug("Initiating public RDS snapshots checking")

    try:
        sns_arn = os.environ["SNS_ARN"]
        config = Config()

        if not config.rdsSnapshot.enabled:
            logging.debug("Public RDS snapshots checking disabled")
            return

        logging.debug("Iterating each account to initiate RDS snapshots checking")
        for account_id, account_name in config.rdsSnapshot.accounts.items():
            payload = {"account_id": account_id,
                       "account_name": account_name,
                       "regions": config.aws.regions,
                       "sns_arn": sns_arn
                      }
            logging.debug(f"Initiating public RDS snapshots checking for '{account_name}'")
            Sns.publish(sns_arn, payload)
    except Exception:
        logging.exception("Error occurred while initiation of public RDS snapshots checking")
        return

    logging.debug("Public RDS snapshot checking initiation done")
Beispiel #4
0
def lambda_handler(event, context):
    """ Lambda handler to initiate to find SQS public access in policy """
    set_logging(level=logging.INFO)
    logging.debug("Initiating SQS policies checking")

    try:
        sns_arn = os.environ["SNS_ARN"]
        config = Config()

        if not config.sqspolicy.enabled:
            logging.debug("SQS policies checking disabled")
            return

        logging.debug(
            "Iterating over each account to initiate SQS policies check")
        for account_id, account_name in config.sqspolicy.accounts.items():
            payload = {
                "account_id": account_id,
                "account_name": account_name,
                "regions": config.aws.regions,
                "sns_arn": sns_arn
            }
            logging.debug(
                f"Initiating SQS policies checking for '{account_name}'")
            Sns.publish(sns_arn, payload)

    except Exception:
        logging.exception(
            "Error occurred while initiation of SQS policy checking")
        return

    logging.debug("SQS policies checking initiation done")
Beispiel #5
0
def lambda_handler(event, context):
    """ Lambda handler to initiate to find security groups unrestricted access """
    set_logging(level=logging.INFO)
    logging.debug("Initiating CloudTrail checking")

    try:
        sns_arn = os.environ["SNS_ARN"]
        config = Config()

        if not config.cloudtrails.enabled:
            logging.debug("CloudTrail checking disabled")
            return

        logging.debug(
            "Iterating over each account to initiate CloudTrail check")
        for account_id, account_name in config.cloudtrails.accounts.items():
            payload = {
                "account_id": account_id,
                "account_name": account_name,
                "regions": config.aws.regions,
                "sns_arn": sns_arn
            }
            logging.debug(
                f"Initiating CloudTrail checking for '{account_name}'")
            Sns.publish(sns_arn, payload)
    except Exception:
        logging.exception(
            "Error occurred while initiation of CloudTrail checking")
        return

    logging.debug("CloudTrail checking initiation done")
Beispiel #6
0
def lambda_handler(event, context):
    """ Lambda handler to initiate to find inactive keys for IAM users """
    set_logging(level=logging.INFO)
    logging.debug("Initiating IAM user inactive keys checking")

    try:
        sns_arn = os.environ["SNS_IAM_USER_INACTIVE_KEYS_ARN"]
        config = Config()

        if not config.iamUserInactiveKeys.enabled:
            logging.debug("IAM user inactive keys checking disabled")
            return

        logging.debug(
            "Iterating over each account to initiate IAM user inactive keys check"
        )
        for account_id, account_name in config.iamUserInactiveKeys.accounts.items(
        ):
            payload = {
                "account_id": account_id,
                "account_name": account_name,
            }
            logging.debug(
                f"Initiating IAM user inactive keys checking for '{account_name}'"
            )
            Sns.publish(sns_arn, payload)
    except Exception:
        logging.exception(
            "Error occurred while initiation of IAM user inactive keys check")
        return

    logging.debug("IAM user inactive keys checking initiation done")
Beispiel #7
0
    def get_error_msg(self, msg='E9999'):
        """
            gets error message
        """
        config = Config()
        # READ XML FILE AND PARSE TO XML OBJECT
        error_dir = config['error_code_dir']
        exclusion = config['error_code_exclusion']
        file_list = []
        import os
        for filename in os.listdir(error_dir):
            if filename.endswith('yaml') and filename not in exclusion:
                file_list.append(os.path.join(error_dir, filename))
        # Read in all the files and assign them to an ET Root
        error_config = {}
        for filename in file_list:
            config = yaml.load(open(os.path.join(error_dir, filename), 'rb'))
            error_config.update(config['errors'])

        # FIND THE ERROR CODE BY BREAKING DOWN THE msg
        if len(msg.split()) == 1:
            # THIS COULD BE A SPECIAL ERROR MESSAGE
            for key in error_config.keys():
                if key in msg.lower():
                    error_val = int(msg.split('_')[1])
                    return error_config[key][error_val]
        return msg
Beispiel #8
0
def lambda_handler(event, context):
    """ Lambda handler to initiate to find unencrypted EBS volumes """
    set_logging(level=logging.INFO)
    logging.debug("Initiating unencrypted EBS volumes checking")

    try:
        sns_arn = os.environ["SNS_EBS_VOLUMES_ARN"]
        config = Config()

        if not config.ebsVolume.enabled:
            logging.debug("Unencrypted EBS volumes checking disabled")
            return

        logging.debug("Iterating over each account to initiate unencrypted EBS volumes checking")
        for account_id, account_name in config.ebsVolume.accounts.items():
            payload = {"account_id": account_id,
                       "account_name": account_name,
                       "regions": config.aws.regions,
                       "sns_arn": sns_arn
                      }
            logging.debug(f"Initiating unencrypted EBS volume checking for '{account_name}'")
            Sns.publish(sns_arn, payload)
    except Exception:
        logging.exception("Error occurred while initiation of unencrypted EBS volumes checking")
        return

    logging.debug("Unencrypted EBS volume checking initiation done")
Beispiel #9
0
def lambda_handler(event, context):
    set_logging(level=logging.DEBUG)

    config = Config()

    #logging.debug("Client token: " + event['authorizationToken'])
    logging.debug("Method ARN: " + event['methodArn'])

    if event['authorizationToken'] != config.api.token:
        raise Exception('Unauthorized')

    principalId = 'hammer-api-user'

    tmp = event['methodArn'].split(':')
    apiGatewayArnTmp = tmp[5].split('/')
    awsAccountId = tmp[4]

    policy = AuthPolicy(principalId, awsAccountId)
    policy.restApiId = apiGatewayArnTmp[0]
    policy.region = tmp[3]
    policy.stage = apiGatewayArnTmp[1]
    # a quick hack to allow GET calls to /identify/{request_id}, request_id is hex string
    # rewrite this solution to more generic variant
    if len(apiGatewayArnTmp) == 5:
        full_path = '/identify/' + apiGatewayArnTmp[4]
        policy.allowMethod(HttpVerb.GET, full_path)
    policy.allowMethod(HttpVerb.POST, '/identify')
    policy.allowMethod(HttpVerb.POST, '/remediate')

    authResponse = policy.build()

    logging.debug(jsonDumps(authResponse))

    return authResponse
Beispiel #10
0
def get_tc_list(module_name):
    """
        Returns a list of test cases names (strings) to be executed by calling find_tc
        @param module_name (string) - The module (dot py file)  to pull all the test_cases from
        @return tc_list (string List)
    """
    # LOADING THE TC CONFIG
    config = Config()
    tc_list = list()
    # REGEX TO FIND TEST CASES IN THE TEST CLASS
    comp = re.compile("(test_)([a-z_0-9]+?)(\d{4}$)")
    tmp_list = module_name.split('_')
    # CONVERT THE MODULE NAME TO CLASS NAME, MUST FOLLOW THIS FORMAT
    class_name = tmp_list[0].upper() + tmp_list[1][0].upper() + tmp_list[1][1:]
    # LOAD THE TEST CASE YAML
    with open(config['test_case_file'], 'rb') as f:
        test_case_config = yaml.load(f)

    # FIND THE PARENT MODULE THAT HAS THE TEST SUITE MODULE
    for m_name in test_case_config:
        if module_name in test_case_config[m_name]:
            # THIS IS THE RIGHT MODULE THAT WE ARE LOOKING FOR SO IMPORT THE SUB MODULE
            idx = test_case_config[m_name].index(module_name)
            module = importlib.import_module(
                f"test_cases.{m_name}.{test_case_config[m_name][idx]}")
            test_class = getattr(module, class_name)
            for name in test_class.__dict__:
                if comp.match(name):
                    tc_list.append(name)
            #tc_list.append(tmp_data)
    return tc_list
Beispiel #11
0
def get_scan_results(request_id):
    config = Config()
    main_account = Account(region=config.aws.region)
    api_table = main_account.resource("dynamodb").Table(
        config.api.ddb_table_name)
    request_info = DDB.get_request_data(api_table, request_id)
    if not request_info:
        status_code = 404
        body = {"message": "Request id has not been found."}
    elif request_info['progress'] == request_info['total']:
        status_code = 200
        body = {
            "scan_status": "COMPLETE",
            "scan_results": collect_results(request_info, main_account)
        }
    elif time.time() - request_info['updated'] <= 300:
        status_code = 200
        body = {"scan_status": "IN_PROGRESS"}
    else:
        status_code = 200
        body = {"scan_status": "FAILED"}
    return {
        "statusCode": status_code,
        "body": json.dumps(body, indent=4, default=utility.jsonEncoder)
    }
Beispiel #12
0
def generate(
    model_dir: Path,
    model_iteration: Optional[int],
    model_config: Optional[Path],
    output_dir: Path,
    use_gpu: bool,
):
    if model_config is None:
        model_config = model_dir / "config.yaml"

    output_dir.mkdir(exist_ok=True)
    save_arguments(output_dir / "arguments.yaml", generate, locals())

    config = Config.from_dict(yaml.safe_load(model_config.open()))

    model_path = _get_predictor_model_path(
        model_dir=model_dir,
        iteration=model_iteration,
    )
    generator = Generator(
        config=config,
        predictor=model_path,
        use_gpu=use_gpu,
    )

    dataset = create_dataset(config.dataset)["test"]
    for data in tqdm(dataset, desc="generate"):
        target = data["target"]
        output = generator.generate(data["feature"])
def lambda_handler(event, context):
    """ Lambda handler to initiate to find S3 bucket public access in ACL """
    set_logging(level=logging.INFO)
    logging.debug("Initiating S3 acls checking")

    try:
        sns_arn = os.environ["SNS_S3_ACL_ARN"]
        config = Config()

        if not config.s3acl.enabled:
            logging.debug("S3 acls checking disabled")
            return

        logging.debug("Iterating over each account to initiate s3 acls check")
        for account_id, account_name in config.s3acl.accounts.items():
            payload = {
                "account_id": account_id,
                "account_name": account_name,
            }
            logging.debug(f"Initiating s3 acls checking for '{account_name}'")
            Sns.publish(sns_arn, payload)
    except Exception:
        logging.exception("Error occurred while initiation of S3 acl checking")
        return

    logging.debug("S3 acls checking initiation done")
Beispiel #14
0
def create_json_info():
    """

    @return:
    """
    config = Config()
    # Get the OS, dist (7, 2012, minty, ubuntu), and bit (32 or 64) of the running system
    (opsys, ip_addr, dist, bit, hostname) = get_os_info()
    # Assign the values to the TestRun so that they are available in the xml report
    config['os'] = opsys
    config['ip'] = ip_addr
    config['dist'] = dist
    config['bit'] = bit
    config['hostname'] = hostname
    print("Hostname is {}".format(hostname))

    output_file = config['json_file_path']
    test_run_dict = dict()
    test_run_dict['name'] = config['hostname']
    test_run_dict['run_id'] = config['run_id']
    test_run_dict['os'] = config['os']
    test_run_dict['ip'] = config['ip']
    test_run_dict['browser'] = config['browser']
    test_run_dict['dist'] = config['dist']
    test_run_dict['ip'] = config['ip']
    test_run_dict['test_cases'] = []

    if not os.path.exists(config['output_dir']):
        os.mkdir(config['output_dir'])
    with open(output_file, 'w') as outfile:
        outfile.write(json.dumps(test_run_dict, sort_keys=True, indent=4))
Beispiel #15
0
 def __init__(self):
     self.config = Config()
     self.enabled = self.config.aws.ddb_backup_enabled
     self.retention_period = self.config.aws.ddb_backup_retention
     self.account = Account(region=self.config.aws.region)
     self.ddb_client = self.account.client('dynamodb')
     self.ddb_resource = self.account.resource('dynamodb')
     self.now = datetime.now(timezone.utc)
     # used as a part of backup name
     self.today = self.now.strftime("%Y-%m-%d")
def test_config_compute_parsed():
    dataset, source, destination, info = Config(
        f"{Path(__file__).parent}/data/socrata.yml").compute_parsed
    assert dataset["source"] == source
    assert dataset["info"] == info
    assert dataset["destination"] == destination
    assert "url" in list(source.keys())
    assert "options" in list(source.keys())
    assert "geometry" in list(source.keys())
    assert "fields" in list(destination.keys())
    assert "options" in list(destination.keys())
    assert "geometry" in list(destination.keys())
Beispiel #17
0
def get_test_metadata(test_list):
    """

    :param test_list:
    :return:
    """
    config = Config()
    metadata_list = list()
    # GET THE TEST ID
    for test_name in test_list:
        # APPEND test_ IF NOT PRESENT
        test_name = test_name.replace("test_", "")
        full_test_name = 'test_' + test_name if test_name[:5] != 'test_' else test_name

        test_object = find_tc(full_test_name)
        assert test_object, "Failed to find any test case named '{}'".format(
            test_name)
        test_id_list = re.findall("test_([a-zA-Z_]+?\d{4})", full_test_name)
        assert len(test_id_list) == 1, "Test name does not follow pattern"
        # GET THE DOC STRING INTO A DICTIONARY
        doc_dict = test_object.doc_dict
        test_cases = {"": ""}
        try:
            if 'note' in doc_dict and type(eval(doc_dict['note'])) is dict:
                test_cases = eval(doc_dict['note'])
        except Exception:
            print("Note is not a dictionary")

        # MAKE A TESTCASE FOR EACH TEST IF A DICTIONARY IS ADDED
        for key, val in test_cases.iteritems():
            test_metadata = test_object.doc_dict.copy()
            test_metadata['name'] = "{}{}".format(test_metadata['title'],
                                                  " %s" % key if key else "")
            test_metadata['script_id'] = "{}{}".format(
                test_name, " %s" % val if val else "")
            comp = re.compile("# (.+)\n")
            test_metadata['steps'] = [
                x.capitalize().replace('"', '') for x in (comp.findall(
                    inspect.getsource(getattr(test_object, full_test_name))))
            ]
            if 'priority' not in test_metadata:
                test_metadata['priority'] = 3
            if 'result' not in test_metadata:
                test_metadata['result'] = test_metadata['test'].replace(
                    '"', '')
            metadata_list.append(test_metadata)
    return metadata_list
Beispiel #18
0
def property_notifications():
    if request.method == "GET":
        return jsonify({'status': 'success', 'message': 'notification user successfully received'}), 200
    else:
        notifications_data = request.get_json()
        print(notifications_data)
        if "email" in notifications_data:
            email = notifications_data['email']
        else:
            return jsonify({'status': 'failure', 'message': 'unable to located email'}), 500
        if "name" in notifications_data:
            name = notifications_data['name']
        else:
            return jsonify({'status': 'failure', 'message': 'unable to located name'}), 500
        if "uid" in notifications_data:
            uid = notifications_data['surname']
        else:
            return jsonify({'status': 'failure', 'message': 'unable to located uid'}), 500

        return AdminView(config=Config()).add_notifications_user(email=email, name=name, uid=uid)
Beispiel #19
0
def find_tc(test_name):
    """
        Finds the test case test from the test case object by name
        @param: test_name(string) - the name of the test case, such as 'test_0001'
        @param: config(Configuration Object) The config object to pass to the test case once found
        @return: Unit Test test case object
        @note: will find the test case based on the name regardless of where in the test_cases package it's located
    """
    config = Config()
    with open(config['test_case_file'], 'rb') as f:
        tc_config = yaml.load(f)

    for k, v in tc_config.items():
        for package in v:
            module = importlib.import_module(f'test_cases.{k}.{package}')
            for _, oj in inspect.getmembers(module):
                if inspect.isclass(oj):
                    if hasattr(oj, test_name):
                        return oj(test_name)
    return None
Beispiel #20
0
 def js_snippet(self, snippet_title, format_text=None, *args):
     """ 
         Runs a javascript snippet
         @param snippet_title (string) - the name of the snippet to execute
         @param format_text (string) -
         @param args (Variable arguments) - the arguments to pass into the execute script
         @returns the return value of the javascript snippet
     """
     config = Config()
     logger.debug("Snippet: {} beginning...".format(snippet_title))
     # Read in the JSSnippet
     script = ""
     snippet_path = config['js_snippet_dir']
     with open(os.path.join(snippet_path, snippet_title), 'r') as f:
         script += f.read()
     if format_text is not None:
         script = script % format_text
     ret_val = self.execute_script(script, True, False, *args)
     logger.debug("Snippet: {} complete...".format(snippet_title))
     return ret_val
Beispiel #21
0
 def __init__(self, name):
     """
         Test Template Initialization initializes a TestTemplate object with some pre-configured values
         @param name (string) - Name of the test to initialize
         @returns TestTemplate Object
         @note This setups up the verbs, assigns the config to it, calls the functions
         docstring and then initializes the action
     """
     config = Config()
     super(TestTemplate, self).__init__(name)
     self.doc_dict = None
     func = getattr(self, name)
     self.get_test_doc(name, func.__doc__)
     # ASSIGN VALUES TO CONFIG
     config['name'] = self.doc_dict['name']
     logger.info("NAME: {}".format(config['name']))
     config['title'] = self.doc_dict['title']
     logger.info("TITLE: {}".format(config['title']))
     config['test'] = self.doc_dict['test']
     logger.info("TEST: {}".format(config['test']))
     config['bug'] = self.doc_dict['bug']
     logger.info("BUG: {}".format(config['bug']))
                    except Exception:
                        logging.exception(
                            f"Error occurred while updating bucket '{bucket_name}' policy "
                            f"in '{account_name} / {account_id}'")
                else:
                    logging.debug(
                        f"Skipping '{bucket_name}' "
                        f"({retention_period - no_of_days_issue_created} days before remediation)"
                    )


if __name__ == "__main__":
    module_name = sys.modules[__name__].__loader__.name
    set_logging(level=logging.DEBUG,
                logfile=f"/var/log/hammer/{module_name}.log")
    config = Config()
    add_cw_logging(config.local.log_group,
                   log_stream=module_name,
                   level=logging.DEBUG,
                   region=config.aws.region)
    try:
        si = SingletonInstance(module_name)
    except SingletonInstanceException:
        logging.error(
            f"Another instance of '{module_name}' is already running, quitting"
        )
        sys.exit(1)

    parser = argparse.ArgumentParser()
    parser.add_argument('--batch',
                        action='store_true',
def lambda_handler(event, context):
    """ Lambda handler to evaluate public ami issues"""
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
        # if request_id is present in payload then this lambda was called from the API
        request_id = payload.get('request_id', None)
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.publicAMIs.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for Public AMI issues for {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, PublicAMIIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {
            issue.issue_id: issue
            for issue in open_issues if issue.issue_details.region == region
        }
        logging.debug(f"Public AMIs in DDB:\n{open_issues.keys()}")

        checker = PublicAMIChecker(account=account)
        if checker.check():
            for ami in checker.amis:
                logging.debug(f"Checking {ami.id}")
                if ami.public_access:
                    issue = PublicAMIIssue(account_id, ami.id)
                    issue.issue_details.tags = ami.tags
                    issue.issue_details.name = ami.name
                    issue.issue_details.region = region
                    if config.publicAMIs.in_whitelist(account_id, ami.id):
                        issue.status = IssueStatus.Whitelisted
                    else:
                        issue.status = IssueStatus.Open
                    logging.debug(
                        f"Setting {ami.id}/{ami.id} status {issue.status}")
                    IssueOperations.update(ddb_table, issue)
                    # remove issue id from issues_list_from_db (if exists)
                    # as we already checked it
                    open_issues.pop(ami.id, None)

            logging.debug(f"Public AMIs in DDB:\n{open_issues.keys()}")
            # all other unresolved issues in DDB are for removed/remediated keys
            for issue in open_issues.values():
                IssueOperations.set_status_resolved(ddb_table, issue)
        # track the progress of API request to scan specific account/region/feature
        if request_id:
            api_table = main_account.resource("dynamodb").Table(
                config.api.ddb_table_name)
            DDB.track_progress(api_table, request_id)
    except Exception:
        logging.exception(
            f"Failed to check AMI public access for '{account_id} ({account_name})'"
        )
        return

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain public AMI checking")

    logging.debug(
        f"Checked AMI public access for '{account_id} ({account_name})'")
Beispiel #24
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate public EBS snapshots """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.ebsSnapshot.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for public EBS snapshots in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, EBSPublicSnapshotIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {
            issue.issue_id: issue
            for issue in open_issues if issue.issue_details.region == region
        }
        logging.debug(f"Public EBS snapshots in DDB:\n{open_issues.keys()}")

        checker = EBSPublicSnapshotsChecker(account=account)
        if checker.check():
            for snapshot in checker.snapshots:
                if snapshot.public:
                    issue = EBSPublicSnapshotIssue(account_id, snapshot.id)
                    issue.issue_details.region = snapshot.account.region
                    issue.issue_details.volume_id = snapshot.volume_id
                    issue.issue_details.tags = snapshot.tags
                    if config.ebsSnapshot.in_whitelist(account_id,
                                                       snapshot.id):
                        issue.status = IssueStatus.Whitelisted
                    else:
                        issue.status = IssueStatus.Open
                    logging.debug(
                        f"Setting {snapshot.id} status {issue.status}")
                    IssueOperations.update(ddb_table, issue)
                    # remove issue id from issues_list_from_db (if exists)
                    # as we already checked it
                    open_issues.pop(snapshot.id, None)

            logging.debug(
                f"Public EBS snapshots in DDB:\n{open_issues.keys()}")
            # all other unresolved issues in DDB are for removed/remediated EBS snapshots
            for issue in open_issues.values():
                IssueOperations.set_status_resolved(ddb_table, issue)
    except Exception:
        logging.exception(
            f"Failed to check public EBS snapshots in '{region}' for '{account_id} ({account_name})'"
        )

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain public EBS snapshots checking")

    logging.debug(
        f"Checked public EBS snapshots in '{region}' for '{account_id} ({account_name})'"
    )
Beispiel #25
0
import logging
import sys


from library.logger import set_logging, add_cw_logging
from library.config import Config
from slackbot import settings
from slackbot.bot import Bot


def main():
    bot = Bot()
    bot.run()


if __name__ == "__main__":
    module_name = sys.modules[__name__].__loader__.name
    set_logging(level=logging.WARNING, logfile=f"/var/log/hammer/{module_name}.log")
    settings.config = Config()
    if not settings.config.slack.enabled:
        sys.exit(0)

    settings.API_TOKEN = settings.config.slack.api_token
    settings.PLUGINS = ['bot.commands']
    add_cw_logging(settings.config.local.log_group,
                   log_stream=module_name,
                   level=logging.WARNING,
                   region=settings.config.aws.region)
    main()
 def __init__(self):
     self.config = Config()
Beispiel #27
0
def lambda_handler(event, context):
    """ Lambda handler to evaluate insecure services """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # get the last region from the list to process
        region = payload['regions'].pop()
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.sg.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          region=region,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for insecure services in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, SecurityGroupIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {
            issue.issue_id: issue
            for issue in open_issues if issue.issue_details.region == region
        }
        logging.debug(f"Security groups in DDB:\n{open_issues.keys()}")

        checker = SecurityGroupsChecker(
            account=account, restricted_ports=config.sg.restricted_ports)
        if checker.check():
            for sg in checker.groups:
                logging.debug(f"Checking {sg.name} ({sg.id})")
                if not sg.restricted:
                    # TODO: move instances detection for security group from reporting to identification
                    #ec2_instances = EC2Operations.get_instance_details_of_sg_associated(account.client("ec2"), sg.id)
                    #logging.debug(f"associated ec2 instances: {ec2_instances}")
                    issue = SecurityGroupIssue(account_id, sg.id)
                    issue.issue_details.name = sg.name
                    issue.issue_details.region = sg.account.region
                    issue.issue_details.tags = sg.tags
                    issue.issue_details.status = sg.status.value
                    for perm in sg.permissions:
                        for ip_range in perm.ip_ranges:
                            if not ip_range.restricted:
                                issue.add_perm(perm.protocol, perm.from_port,
                                               perm.to_port, ip_range.cidr,
                                               ip_range.status)
                    if config.sg.in_whitelist(
                            account_id, sg.name) or config.sg.in_whitelist(
                                account_id, sg.id):
                        issue.status = IssueStatus.Whitelisted
                    else:
                        issue.status = IssueStatus.Open
                    logging.debug(f"Setting {sg.id} status {issue.status}")
                    IssueOperations.update(ddb_table, issue)
                    # remove issue id from issues_list_from_db (if exists)
                    # as we already checked it
                    open_issues.pop(sg.id, None)

            logging.debug(f"Security groups in DDB:\n{open_issues.keys()}")
            # all other unresolved issues in DDB are for removed/remediated security groups
            for issue in open_issues.values():
                IssueOperations.set_status_resolved(ddb_table, issue)
    except Exception:
        logging.exception(
            f"Failed to check insecure services in '{region}' for '{account_id} ({account_name})'"
        )

    # push SNS messages until the list with regions to check is empty
    if len(payload['regions']) > 0:
        try:
            Sns.publish(payload["sns_arn"], payload)
        except Exception:
            logging.exception("Failed to chain insecure services checking")

    logging.debug(
        f"Checked insecure services in '{region}' for '{account_id} ({account_name})'"
    )
Beispiel #28
0
def create_trainer(
    config_dict: Dict[str, Any],
    output: Path,
):
    # config
    config = Config.from_dict(config_dict)
    config.add_git_info()

    output.mkdir(exist_ok=True, parents=True)
    with (output / "config.yaml").open(mode="w") as f:
        yaml.safe_dump(config.to_dict(), f)

    # model
    networks = create_network(config.network)
    model = Model(model_config=config.model, networks=networks)
    if config.train.weight_initializer is not None:
        init_weights(model, name=config.train.weight_initializer)

    device = torch.device("cuda") if config.train.use_gpu else torch.device(
        "cpu")
    model.to(device)

    # dataset
    _create_iterator = partial(
        create_iterator,
        batch_size=config.train.batch_size,
        eval_batch_size=config.train.eval_batch_size,
        num_processes=config.train.num_processes,
        use_multithread=config.train.use_multithread,
    )

    datasets = create_dataset(config.dataset)
    train_iter = _create_iterator(datasets["train"], for_train=True)
    test_iter = _create_iterator(datasets["test"], for_train=False)
    eval_iter = _create_iterator(datasets["eval"], for_train=False)

    warnings.simplefilter("error", MultiprocessIterator.TimeoutWarning)

    # optimizer
    optimizer = make_optimizer(config_dict=config.train.optimizer, model=model)

    # updater
    if not config.train.use_amp:
        updater = StandardUpdater(
            iterator=train_iter,
            optimizer=optimizer,
            model=model,
            device=device,
        )
    else:
        updater = AmpUpdater(
            iterator=train_iter,
            optimizer=optimizer,
            model=model,
            device=device,
        )

    # trainer
    trigger_log = (config.train.log_iteration, "iteration")
    trigger_eval = (config.train.eval_iteration, "iteration")
    trigger_snapshot = (config.train.snapshot_iteration, "iteration")
    trigger_stop = ((config.train.stop_iteration, "iteration")
                    if config.train.stop_iteration is not None else None)

    trainer = Trainer(updater, stop_trigger=trigger_stop, out=output)

    ext = extensions.Evaluator(test_iter, model, device=device)
    trainer.extend(ext, name="test", trigger=trigger_log)

    if config.train.stop_iteration is not None:
        saving_model_num = int(config.train.stop_iteration /
                               config.train.eval_iteration / 10)
    else:
        saving_model_num = 10
    ext = extensions.snapshot_object(
        networks.predictor,
        filename="predictor_{.updater.iteration}.pth",
        n_retains=saving_model_num,
    )
    trainer.extend(
        ext,
        trigger=LowValueTrigger("test/main/loss", trigger=trigger_eval),
    )

    trainer.extend(extensions.FailOnNonNumber(), trigger=trigger_log)
    trainer.extend(extensions.observe_lr(), trigger=trigger_log)
    trainer.extend(extensions.LogReport(trigger=trigger_log))
    trainer.extend(
        extensions.PrintReport(["iteration", "main/loss", "test/main/loss"]),
        trigger=trigger_log,
    )

    ext = TensorboardReport(writer=SummaryWriter(Path(output)))
    trainer.extend(ext, trigger=trigger_log)

    if config.project.category is not None:
        ext = WandbReport(
            config_dict=config.to_dict(),
            project_category=config.project.category,
            project_name=config.project.name,
            output_dir=output.joinpath("wandb"),
        )
        trainer.extend(ext, trigger=trigger_log)

    (output / "struct.txt").write_text(repr(model))

    if trigger_stop is not None:
        trainer.extend(extensions.ProgressBar(trigger_stop))

    ext = extensions.snapshot_object(
        trainer,
        filename="trainer_{.updater.iteration}.pth",
        n_retains=1,
        autoload=True,
    )
    trainer.extend(ext, trigger=trigger_snapshot)

    return trainer
Beispiel #29
0
 def __init__(self, config=None):
     self.config = Config() if config is None else config
     self.sc = SlackClient(self.config.slack.api_token)
     self.slackUser = "******"
def lambda_handler(event, context):
    """ Lambda handler to evaluate s3 buckets acl """
    set_logging(level=logging.INFO)

    try:
        payload = json.loads(event["Records"][0]["Sns"]["Message"])
        account_id = payload['account_id']
        account_name = payload['account_name']
        # if request_id is present in payload then this lambda was called from the API
        request_id = payload.get('request_id', None)
    except Exception:
        logging.exception(f"Failed to parse event\n{event}")
        return

    try:
        config = Config()

        main_account = Account(region=config.aws.region)
        ddb_table = main_account.resource("dynamodb").Table(
            config.s3acl.ddb_table_name)

        account = Account(id=account_id,
                          name=account_name,
                          role_name=config.aws.role_name_identification)
        if account.session is None:
            return

        logging.debug(f"Checking for public S3 ACLs in {account}")

        # existing open issues for account to check if resolved
        open_issues = IssueOperations.get_account_open_issues(
            ddb_table, account_id, S3AclIssue)
        # make dictionary for fast search by id
        # and filter by current region
        open_issues = {issue.issue_id: issue for issue in open_issues}
        logging.debug(f"S3 in DDB:\n{open_issues.keys()}")

        checker = S3BucketsAclChecker(account=account)
        if not checker.check():
            return

        for bucket in checker.buckets:
            logging.debug(f"Checking {bucket.name}")
            if bucket.public:
                issue = S3AclIssue(account_id, bucket.name)
                issue.issue_details.owner = bucket.owner
                issue.issue_details.public_acls = bucket.get_public_acls()
                issue.issue_details.tags = bucket.tags
                if config.s3acl.in_whitelist(account_id, bucket.name):
                    issue.status = IssueStatus.Whitelisted
                else:
                    issue.status = IssueStatus.Open
                logging.debug(f"Setting {bucket.name} status {issue.status}")
                IssueOperations.update(ddb_table, issue)
                # remove issue id from issues_list_from_db (if exists)
                # as we already checked it
                open_issues.pop(bucket.name, None)

        logging.debug(f"S3 in DDB:\n{open_issues.keys()}")
        # all other unresolved issues in DDB are for removed/remediated buckets
        for issue in open_issues.values():
            IssueOperations.set_status_resolved(ddb_table, issue)
        if request_id:
            api_table = main_account.resource("dynamodb").Table(
                config.api.ddb_table_name)
            DDB.track_progress(api_table, request_id)
    except Exception:
        logging.exception(
            f"Failed to check s3 acls for '{account_id} ({account_name})'")
        return

    logging.debug(f"Checked s3 acls for '{account_id} ({account_name})'")