Exemplo n.º 1
0
    def make_data(self):
        """
        """
        if self.does_table_exist():
            shlog.normal("untagged_list already collected")
            return

        shlog.normal("beginning to make {} data".format(self.name))
        # Prepare table for untagged onject list
        sql = """CREATE TABLE untagged_list
                      (
                         service TEXT, request TEXT, json TEXT
                       )
                      """
        shlog.verbose(sql)
        self.q.q(sql)

        # analyze data from all_json
        sql = """SELECT resource_name, id, record as json
                    FROM all_json a
                    WHERE a.record like '%"Tags": []%' -- empty tags
                      OR a.record like '%"TagSet": []%' -- empty tagset
                      OR (a.record not like '%Tags":%' and a.record not like '%TagSet":%') -- no tag dicts
                      """
        df = self.q.q_to_df(sql)

        # pyqj relevant information

        df['json'] = df['json'].apply(lambda x: self.extract_evidence(x))

        self.q.df_to_db(self.table_name, df)
Exemplo n.º 2
0
    def make_data(self):
        """
        Make a table called TAGS based on tagging data.
        This collection of data is based on the resourcetaggingapi

        If the tags table exists, then we take it data collection
        would result in duplicate rows. 
        """
        if self.does_table_exist():
            shlog.normal("tags data already collected")
            return

        shlog.normal("beginning to make {} data".format(self.name))
        # Make a flattened table for the tag data.
        # one tag, value pair in each record.
        sql = "create table tags (short_arn text, tag text, value text, arn text)"
        shlog.verbose(sql)
        self.q.q(sql)

        for page, _ in self._pages_all_regions('resourcegroupstaggingapi',
                                               'get_resources'):

            ResourceMappingList = page['ResourceTagMappingList']
            for d in ResourceMappingList:
                full_arn = d['ResourceARN']
                short_arn = aws_utils.shortened_arn(d['ResourceARN'])
                for kvdict in d['Tags']:
                    sql = 'insert into tags values ("%s","%s","%s", "%s")' % (
                        short_arn, kvdict["Key"], kvdict["Value"], full_arn)
                    self.q.q(sql)
Exemplo n.º 3
0
def main(args):
   """visualize IAM entities and policies attached to them
        x_report needs to be run first"""
   from security_scripts.information.lib import vanilla_utils
   from security_scripts.information.lib import L1_L2
   from security_scripts.information.lib import Liam

   # decode full path to dbfile so imports from other directories don't get confused
   if args.dbfile != ':memory:':
       args.dbfile = os.path.abspath(args.dbfile)

   shlog.verbose(args)
   shlog.verbose("only tests matching %s will be considered",(args.only))
   q=vanilla_utils.Q(args.dbfile, args.flush)

   # switch in custom to_self and to_others
   L1_replacer()

   # run L1_L2
   L1_L2.Acquire(args, "L1_L2", q)

   # cluster by type (label minus name minus /n
   Liam.Acquire(args, "Liam", q)

   exit(0)
Exemplo n.º 4
0
def L1_replacer():
    from security_scripts.information import L1_preport
    l1dir = os.getcwd() + "/report_files/L1/"
    shlog.verbose("Overwriting L1 files")
    with open(l1dir + 'to_self.json', 'w') as file:
        file.write(L1_preport.t_self)
    with open(l1dir + 'to_others.json', 'w') as file:
        file.write(L1_preport.t_others)
Exemplo n.º 5
0
    def make_data(self):
        """
        """
        if self.does_table_exist():
            shlog.normal("tag_counter already collected")
            return

        shlog.normal("beginning to make {} data".format(self.name))
        # Prepare table for tag count analysis output
        sql = """CREATE TABLE tag_counter
                      (
                         service TEXT, request TEXT, total TEXT, untagged TEXT, tagged TEXT
                       )
                      """
        shlog.verbose(sql)
        self.q.q(sql)

        # analyze data from all_json
        sql = """WITH untagged(resource,id, cnt) AS (
                    SELECT resource_name, id, count(record) as cnt
                    FROM all_json a
                    WHERE a.record like '%"Tags": []%' -- empty tags
                      OR a.record like '%"TagSet": []%' -- empty tagset
                      OR (a.record not like '%Tags":%' and a.record not like '%TagSet":%') -- no tag dicts
                    GROUP BY resource_name, id
                ),
                     tagged(resource, id, cnt) AS (
                    SELECT resource_name, id, count(record) as cnt
                    FROM all_json a
                    WHERE a.record like '%"Tags": [{%' -- empty tags
                      OR a.record like '%"TagSet": [{%' -- empty tagset
                    GROUP BY resource_name, id
                     ),
                
                     all_count(resource, id, cnt) AS (
                    SELECT resource_name, id, count(record) as cnt
                    FROM all_json a
                    GROUP BY resource_name, id
                     )
                
                SELECT a.resource, a.id, a.cnt as total,
                       ifnull(u.cnt, 0) as untagged,
                       ifnull(t.cnt,0) as tagged
                    FROM all_count a
                    LEFT JOIN untagged u on u.resource = a.resource and u.id = a.id
                    LEFT JOIN tagged t on t.resource = a.resource and t.id = a.id
                    GROUP BY a.resource, a.id
        """
        df = self.q.q_to_df(sql)

        self.q.df_to_db(self.table_name, df)
Exemplo n.º 6
0
    def make_data(self):
        """
        Make a table called TAGS based on tagging data.
        This collection of data is based on the resourcetaggingapi

        If the tags table exists, then we take it data collection
        would result in duplicate rows. 
        """
        if self.does_table_exist():
            shlog.normal("secretsdata already collected")
            return

        shlog.normal("beginning to make {} data".format(self.name))
        # Make a flattened table for the tag data.
        # one tag, value pair in each record.
        sql = """CREATE TABLE secrets
              (
                 short_arn TEXT, region TEXT, name TEXT, description TEXT,
                 lastchangeddate TEXT, arn TEXT, record JSON
               )
              """
        shlog.verbose(sql)
        self.q.q(sql)

        # Get the tags for each region.
        # accomidate the boto3 API can retul data in pages.
        for page, _ in self._pages_all_regions('secretsmanager',
                                               'list_secrets'):

            for secret in page['SecretList']:
                #import pdb; pdb.set_trace()
                arn = secret['ARN']
                short_arn = aws_utils.shortened_arn(arn)
                region = secret["ARN"].split(':')[3]
                name = secret['Name']
                description = secret['Description']
                lastchangeddate = datetime.datetime.isoformat(
                    secret['LastChangedDate'])
                record = secret
                record["FlatTags"] = vanilla_utils.flatten_tags(secret["Tags"])
                record = self._json_clean_dumps(record)
                sql = """
                       INSERT INTO secrets VALUES (?, ?, ?, ?, ?, ?, ?)
                         """
                list = (short_arn, region, name, description, lastchangeddate,
                        arn, record)
                shlog.verbose(sql)
                self.q.executemany(sql, [list])
                # populate the all_json table
                self._insert_all_json("secrets", short_arn, record)
Exemplo n.º 7
0
def main(args):
    """Run tag, s3, secret, certificate, repo inventory reports"""
    from security_scripts.information.lib import vanilla_utils
    from security_scripts.information.lib import tags
    from security_scripts.information.lib import s3
    from security_scripts.information.lib import secrets
    from security_scripts.information.lib import certificates
    from security_scripts.information.lib import repos
    from security_scripts.information.lib import load_balancer
    from security_scripts.information.lib import instances
    from security_scripts.information.lib import assets

    # decode full path to dbfile so imports from other directories don't get confused
    if args.dbfile != ':memory:':
        args.dbfile = os.path.abspath(args.dbfile)

    shlog.verbose(args)
    shlog.verbose("only tests matching %s will be considered", (args.only))
    q = vanilla_utils.Q(args.dbfile, args.flush)
    tag_acquire = tags.Acquire(args, "TAGS", q)
    repos_acquire = repos.Acquire(args, "repos", q)
    s3_acquire = s3.Acquire(args, "s3", q)
    secret_acquire = secrets.Acquire(args, "secrets", q)
    certificate_acquire = certificates.Acquire(args, "TAGS", q)
    load_balancer_acquire = load_balancer.Acquire(args, "load_balancer", q)
    instances_acquire = instances.Acquire(args, "instances", q)
    # at this point data is in the relattion DB
    if args.dump:
        tag_acquire.print_data()
        s3_acquire.print_data()
        secret_acquire.print_data()
        certificate_acquire.print_data()
        load_balancer_acquire.print_data()
        repos_acquire.print_data()
        # instances_acquire.print_data()
        exit()

    # reporting actions are driven by instanitating the classes.
    tag_reports = tags.Report(args, "Tagging Rule Check", q)
    s3_reports = s3.Report(args, "s3", q)
    secret_reports = secrets.Report(args, "secrets", q)
    cert_reports = certificates.Report(args, "Certificates", q)
    load_balancer_reports = load_balancer.Report(args, "load_balancers", q)
    repo_reports = repos.Report(args, "repos", q)
    instances_reports = instances.Report(args, "Tagging Rule Check", q)

    assets.Acquire(args, "assets", q)
    assets.Report(args, "assets", q)
Exemplo n.º 8
0
    def make_data(self):
        """
        Make a table called CERTIFICATES based on certificates held in AWS
        """
        if self.does_table_exist():
            shlog.normal("{} data already collected".format(self.table_name))
            return

        shlog.normal("beginning to make {} data".format(self.table_name))
        #import pdb; pdb.set_trace()
        # Make a flattened table for the tag data.
        # one tag, value pair in each record.
        sql = """CREATE TABLE certificates (asset text, domain text,
                            arn text, short_arn text, inuseby text, hash text, record json)"""
        shlog.verbose(sql)
        self.q.q(sql)

        # Get the tags for each region.
        # accomidate the boto3 API can retul data in pages
        for page, _ in self._pages_all_regions('acm', 'list_certificates'):
            for certificate in page['CertificateSummaryList']:
                arn = certificate["CertificateArn"]
                domain = certificate["DomainName"]
                short_arn = aws_utils.shortened_arn(arn)
                try:
                    for page, _ in self._pages_all_regions('acm','describe_certificate',parameter={'CertificateArn':arn}):
                        # for response in page:
                        if 'Nothing' not in page.keys():
                            record = page
                            record = record["Certificate"]
                            inuseby = record["InUseBy"]
                            inuseby = [aws_utils.shortened_arn(arn) for arn in inuseby]
                            inuseby = ",".join(inuseby)
                            asset = "Cert:{} for use by {}".format(domain, inuseby)
                            hash = vanilla_utils.tiny_hash(arn)
                            record = self._json_clean_dumps(record)
                            sql = "INSERT INTO certificates VALUES (?, ?, ?, ?, ?, ?, ?)"
                            params = (asset, domain, arn, short_arn, inuseby, hash, record)
                            self.q.executemany(sql, [params])
                            # populate the all_json table
                            self._insert_all_json("certificate", short_arn, record)
                except Exception as e:
                    if "ResourceNotFoundException" in str(e):
                        print("certificate not found!")
Exemplo n.º 9
0
def ec2stop(args, dryrun=False):
    """
    Make a request to stop all ec2 instances
    :return:
    """
    from botocore.exceptions import ClientError
    from security_scripts.information.lib import aws_utils as au  # only works in plugin and IDE
    args.session = boto3.Session(profile_name=args.profile)
    regions = au.decribe_regions_df(args)  # use for deployment
    # regions = {'RegionName':['us-east-2']} # test mode
    for region in regions['RegionName']:
        shlog.normal('Stopping region ' + region)
        # init connection to region and get instances there
        client = boto3.client('ec2', region_name=region)
        response = client.describe_instances()['Reservations']
        # go through intance ids
        for inst in response:
            # ...and allow termination...
            instance = inst['Instances'][0]['InstanceId']
            shlog.verbose('Allowing API termination for instance ' + instance +
                          ' in region ' + region)
            response = client.modify_instance_attribute(
                InstanceId=instance, DisableApiTermination={'Value': False})
            shlog.debug(response)
            # ...and perform halt
            shlog.normal('Stopping instance ' + instance + ' in region ' +
                         region)
            try:
                response = client.stop_instances(InstanceIds=[instance],
                                                 DryRun=dryrun,
                                                 Force=True)
            except ClientError as ce:
                if dryrun:
                    # client error is expected when simulating
                    shlog.normal('Stop simulation succeeded for instance ' +
                                 instance)
                    shlog.verbose('Success code: ' + str(ce))
                else:
                    # we might actually want to catch real exceptions
                    raise ce
    pass
Exemplo n.º 10
0
def env_control():
    import platform
    import os
    var = 'SCIMMA_SECURITY_CFG'
    val = os.environ.get(var)
    if val:
        shlog.verbose('Reading custom config file location from $' + var +
                      ' as ' + val)
        return val
    else:
        if platform.system() in ['Linux', 'Darwin']:
            # *nixes
            val = '~/.scimma-security.cfg'
            with open(os.path.expanduser("~/.bash_profile"), "a") as outfile:
                outfile.write("export {0}={1}".format(var, val))
                # pass
            with open(os.path.expanduser("~/.zshenv"), "a") as outfile:
                # pass
                outfile.write("export {0}={1}".format(var, val))
            shlog.verbose('$' + var +
                          ' written to ~/.bash_profile and ~/.zshenv as ' +
                          val)
        else:
            # windows
            val = '$HOME\\scimma-security.cfg'
            os.system('SETX {0} "{1}" /M'.format(var, val))
            shlog.verbose('$' + var +
                          ' written as system variable with value ' + val)
        return val
Exemplo n.º 11
0
    def make_data(self):
        """
        Make a table called ec2 based on tagging data.
        This collection of data is based on the resourcetaggingapi

        If the tags table exists, then we take it data collection
        would result in duplicate rows. 
        """
        if self.does_table_exist():
            shlog.normal("ec2 data already collected")
            return

        shlog.normal("beginning to make {} data".format(self.name)) 
        # Make a flattened table for the tag data.
        # one tag, value pair in each record.
        sql = """CREATE TABLE ec2 (
                 instance TEXT, vpc TEXT, subnet TEXT, publicdnsname TEXT,
                 privatednsame TEXT, privateipaddress TEXT, keyname TEXT,
                record JSON)"""
        shlog.verbose(sql)
        self.q.q(sql)
        # Get the tags for each region.
        # accomidate the boto3 API can retul data in pages
        for page, _ in self._pages_all_regions('ec2', 'describe_instances'):
            reservations = page["Reservations"]
            for r in reservations:
                for  i  in r["Instances"]:
                    record = self._json_clean_dumps(i)
                    instance = i.get("InstanceId","");
                    vpc = i.get("VpcId","");
                    subnet = i.get("SubnetId","");
                    publicdnsname =i.get("PublicDnsName","");
                    privatednsame =i.get('PrivateDnsName',"");
                    privateipaddress = i.get("PrivateIpAddress","");
                    keyname= i.get("KeyName","")
                    sql = '''INSERT INTO ec2 VALUES (?, ?, ?, ?, ?, ?, ?,?)'''
                    list = (instance, vpc, subnet, publicdnsname,  privatednsame, privateipaddress,  keyname, record)
                    self.q.executemany(sql,[list])
                    # populate the all_json table 
                    self._insert_all_json("ec2", instance, record) 
Exemplo n.º 12
0
    def make_data(self):
        """
        Make a table called load_balancer based on tagging data.
        This collection of data is based on the resourcetaggingapi

        If the tags table exists, then we take it data collection
        would result in duplicate rows. 
        """
        if self.does_table_exist():
            shlog.normal("load_balancers already collected")
            return

        shlog.normal("beginning to make {} data".format(self.name))
        # Make a flattened table for the tag data.
        # one tag, value pair in each record.
        sql = """CREATE TABLE load_balancers
              (
                 name TEXT, vpc TEXT, record JSON
               )
              """
        shlog.verbose(sql)
        self.q.q(sql)

        # classic load balancers
        for page, _ in self._pages_all_regions('elb',
                                               'describe_load_balancers'):
            for elb in page['LoadBalancerDescriptions']:
                # import pdb ; pdb.set_trace()
                name = elb['LoadBalancerName']
                vpc = elb['VPCId']
                record = elb
                record = self._json_clean_dumps(record)
                sql = """
                               INSERT INTO load_balancers VALUES (?, ?, ?)
                                 """
                list = (name, vpc, record)
                shlog.verbose(sql)
                self.q.executemany(sql, [list])

        # application balancers
        for page, _ in self._pages_all_regions('elbv2',
                                               'describe_load_balancers'):
            for elb in page['LoadBalancers']:
                #import pdb ; pdb.set_trace()
                name = elb['LoadBalancerName']
                vpc = elb['VpcId']
                type = elb['Type']
                record = elb
                record = self._json_clean_dumps(record)
                sql = """
                       INSERT INTO load_balancers VALUES (?, ?, ?)
                         """
                list = (name, vpc, record)
                shlog.verbose(sql)
                self.q.executemany(sql, [list])
Exemplo n.º 13
0
    def make_data(self):
        """
        Make a table called TAGS based on tagging data.
        This collection of data is based on the resourcetaggingapi

        If the tags table exists, then we take it data collection
        would result in duplicate rows.
        """
        if self.does_table_exist():
            shlog.normal("repos  already collected")
            return

        if self.netrc_has_credentials():
            shlog.normal('using api.github.com login+token from ~/.netrc')
        else:
            shlog.normal('api.github.com credentials not found in ~/.netrc')
            exit(0)

        shlog.normal("beginning to make {} data".format(self.name))
        # Make a flattened table for the tag data.
        # one tag, value pair in each record.
        sql = "CREATE TABLE repos (name TEXT, description TEXT, url TEXT, who TEXT, " \
              "can_pull text, can_push text, admins text, hash TEXT, record JSON)"
        shlog.verbose(sql)
        self.q.q(sql)

        #n.b. beta interface when this was coded
        cmd = 'curl -n -H "Accept: application/vnd.github.inertia-preview+json" ' \
              '   https://api.github.com/orgs/scimma/repos'
        import subprocess
        #n.b check will  throw an execption if curl exits with non 0
        #rik is that we get valid output, lokin of like a "404" page.
        shlog.verbose(cmd)
        result = subprocess.run(cmd,
                                text=True,
                                capture_output=True,
                                shell=True,
                                check=True)
        stdout = result.stdout
        stderr = result.stderr
        if len(result.stdout) < 200:
            shlog.verbose("github curl error: stdout:{} stderr:{}".format(
                stdout, stderr))
            exit(1)
        result = json.loads(result.stdout)
        for repo in result:
            # import pdb; pdb.set_trace()
            name = repo['full_name']
            description = "{}(Private={})".format(repo['description'],
                                                  repo['private'])
            where = repo['url']  #url
            who = repo['owner']['login']  # is really the account.
            hash = vanilla_utils.tiny_hash(name)
            record = json.dumps(result)

            # get user list
            collaborators_url = repo['collaborators_url'].split('{')[0]
            cmd = 'curl -n ' + collaborators_url
            result = subprocess.run(cmd,
                                    text=True,
                                    capture_output=True,
                                    shell=True,
                                    check=True)
            result = json.loads(result.stdout)
            try:
                # attempt to sort the users
                members = {
                    'can_pull': self.get_members(result, 'pull'),
                    'can_push': self.get_members(result, 'push'),
                    'admins': self.get_members(result, 'admin')
                }
            except subprocess.CalledProcessError:
                # repo is unreadable
                cmd = 'curl -n ' + collaborators_url + ' | jq ".message"'
                CPE = subprocess.run(cmd,
                                     text=True,
                                     capture_output=True,
                                     shell=True,
                                     check=True)
                CPE = json.loads(result.stdout)
                admins = result
            sql = "INSERT INTO repos VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"
            self.q.executemany(
                sql, [(name, description, where, who, members['can_pull'],
                       members['can_push'], members['admins'], hash, record)])
Exemplo n.º 14
0
    def make_data(self):
        """
        Make a table called TAGS based on tagging data.
        This collection of data is based on the resourcetaggingapi

        If the tags table exists, then we take it data collection
        would result in duplicate rows. 
        """
        if self.does_table_exist():
            shlog.normal("tags data already collected")
            return

        shlog.normal("beginning to make {} data".format(self.name))
        # Make a flattened table for the tag data.
        # one tag, value pair in each record.
        sql = "create table s3 (asset text, bucket text, arn text, region text, npolicies text, ngrants text, grants json, policy_status text, bucket_policy JSON, record JSON)"
        shlog.verbose(sql)
        self.q.q(sql)

        # get buckets from local storage
        response = self.json_from_file(self.s_path)[0]

        # still need to init
        session = boto3.Session(profile_name=self.args.profile)
        client = session.client('s3')

        # the rest is untouched - this module still makes boto3 requests. shoudl we keep this info locally?
        # should we pull it with L0A?
        #ignore the 'ResponseMetadata' == seems to be no pagination context in there.
        buckets = response["Buckets"]
        for bucket in buckets:
            name = bucket["Name"]
            # arn can be computed from aws partition (e.g aws, aws-us-gov) and bucket name
            arn = "arn:{}:s3:::{}".format("aws", name)
            region = client.head_bucket(
                Bucket=name
            )['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region']
            grants = client.get_bucket_acl(Bucket=name)["Grants"]
            ngrants = len(grants)
            asset = "aws bucket:{}".format(name)

            try:
                result = client.get_bucket_policy_status(Bucket=name)
                policy_status = result["PolicyStatus"]
                bucket_policy = client.get_bucket_policy(Bucket=name)["Policy"]
                npolicies = len(bucket_policy)
            except botocore.exceptions.ClientError:
                policy_status = {"IsPublic": False}
                bucket_policy = []
                npolicies = 0
            except:
                raise
            record = bucket
            record['BucketPolicy'] = policy_status
            record['PolicyStatus'] = bucket_policy
            record['Region'] = region
            record = self._json_clean_dumps(record)

            sql = '''INSERT INTO s3 VALUES (?,?,?,?,?,?,?,?,?,?)'''
            npolicies = "{}".format(npolicies)
            ngrants = "{}".format(ngrants)

            list = (asset, name, arn, region, npolicies, ngrants,
                    json.dumps(grants), json.dumps(policy_status),
                    json.dumps(bucket_policy), record)
            self.q.executemany(sql, [list])
            # populate the all_json table
            self._insert_all_json("s3", name, record)
Exemplo n.º 15
0
    def make_data(self):
        """
        MAKE DATA FOR GIT REPOS.
        """
        if self.does_table_exist():
            shlog.normal("Assets  already collected")
            return

        shlog.normal("beginning to make {} data".format(self.name))
        # Master Table for other reports to  deposit asset data they
        # one tag, value pair in each record.
        sql = "CREATE TABLE {} ({} TEXT)".format(
            self.table_name, " TEXT, ".join(self.asset_columns))
        shlog.vverbose(sql)
        self.q.q(sql)

        # get all the tables.
        sql = """
           SELECT name FROM sqlite_master
           WHERE type IN ('table','view')
           AND name NOT LIKE 'sqlite_%'
           AND name     LIKE 'asset_data%' 
           ORDER BY 1;
        """
        for table in self.q.q(sql).fetchall():
            table = table[0]  #one item selects are retuned in a list.
            #
            # Below is a Massive query fond on the web to ger
            # column meta-data out of sqlite I'd like to
            # finds somemething simpler, but don;t want to touch this
            sql = """
               SELECT m.name as tableName,
                  p.name as columnName,
                  p.type as columnType
               FROM sqlite_master m
               left outer join pragma_table_info((m.name)) p
               on m.name <> p.name
             WHERE m.name is '{}'
             AND columnName in {}
             order by tableName, columnName
             """.format(table, self.asset_columns)
            shlog.vverbose(sql)
            #
            # Now get a string with col, cl, cos
            #
            cols = [col for (_, col, _) in self.q.q(sql)]
            if not cols:
                shlog.verbose(
                    "{} has no matching columns for asset table".format(table))
                continue
            cols_text = ", ".join(cols)

            #
            # and populate the assets table with
            # fields that match
            #
            sql = """
              INSERT INTO assets ({}) 
             SELECT {} from {}
              """.format(cols_text, cols_text, table)
            shlog.vverbose(sql)
            self.q.q(sql)
Exemplo n.º 16
0
def main(args):
    """Run reports pulling in existing infrastructure and mapping them to services"""
    from security_scripts.information.lib import vanilla_utils
    from security_scripts.information.lib import xs3
    from security_scripts.information.lib import xtags
    from security_scripts.information.lib import L0A
    from security_scripts.information.lib import L0A_L0B
    from security_scripts.information.lib import tag_counter
    from security_scripts.information.lib import untagged_lister
    from security_scripts.information.lib import L1_L2
    from security_scripts.information.lib import L0B_L1
    from security_scripts.information.lib import L2_L3

    # decode full path to dbfile so imports from other directories don't get confused
    if args.dbfile != ':memory:':
        args.dbfile = os.path.abspath(args.dbfile)

    shlog.verbose(args)
    shlog.verbose("only tests matching %s will be considered", (args.only))
    q = vanilla_utils.Q(args.dbfile, args.flush)

    # dict structure: lvl name: [import name, priority]
    xreports_dict = {
        'L0A': ['L0A', 1],
        'L0B': ['L0A_L0B', 2],
        'L1': ['L0B_L1', 3],
        'L2': ['L1_L2', 4],
        'L3': ['L2_L3', 5]
    }

    if args.start not in xreports_dict.keys():
        shlog.normal('illegal start parameter: {}'.format(args.start))
        exit(0)

    if args.end not in xreports_dict.keys():
        shlog.normal('illegal end parameter: {}'.format(args.start))
        exit(0)

    for x in xreports_dict:
        # compare x's priority to arg's priority
        if xreports_dict[x][1] >= xreports_dict[args.start][
                1] and xreports_dict[x][1] <= xreports_dict[args.end][1]:
            exec_string = '{}.Acquire(args, "{}", q)'.format(
                xreports_dict[x][0], xreports_dict[x][0])
            exec(exec_string)

    # there's more stuff down there, think about implementing it
    exit(0)
    s3_acquire = xs3.Acquire(args, "s3", q)
    tags_acquire = xtags.Acquire(args, "tags", q)
    tag_c_acquire = tag_counter.Acquire(args, "TAG_COUNTER", q)
    untagged = untagged_lister.Acquire(args, "UNTAGGED_LISTER", q)

    # at this point data is in the relattion DB
    if args.dump:
        # tag_acquire.print_data()
        # s3_acquire.print_data()
        # secret_acquire.print_data()
        # certificate_acquire.print_data()
        # load_balancer_acquire.print_data()
        # repos_acquire.print_data()
        # instances_acquire.print_data()
        exit()

    # reporting actions are driven by instanitating the classes.
    tag_reports = xtags.Report(args, "Tagging Rule Check", q)
    tag_c_report = tag_counter.Report(args, "Tagging Count Check", q)
    untagged_report = untagged_lister.Report(args, "Untagged Resources", q)
    exit()