def inf_s3_summary(self): """ Just list information about all the buckets """ self.current_test = "general information on all S3 buckets" shlog.normal(self.current_test) sql = ''' SELECT * FROM s3 ''' return sql
def xinf_s3_bucket_asset_format(self): """ Report in format for asset catalog. """ shlog.normal("Reporting on bucket in asset formant") sql = ''' SELECT 'R:Bucket Contents:'||bucket asset, 'D' type, 'R:'||region "where" FROM s3 ''' self.df = self.q.q_to_df(sql)
def inf_service_resources(self): """ List AWS resources associated with services. """ shlog.normal("Reporting resources associated with a service") sql = ''' SELECT value, short_arn, arn FROM tags WHERE tag = "Service" ORDER by value, arn ''' return sql
def inf_service_names(self): """ List unique service names found tags in the running system """ shlog.normal("Reporting on unique service names") sql = ''' SELECT distinct value FROM tags WHERE tag = "Service" ''' return sql
def inf_tags_bound_to_secrets(self): """ Show tags bound to secrets. Tagging can be used to control access to secrets. """ shlog.normal("tags that are bund to secrets") sql = ''' SELECT short_arn, tag, value, arn FROM tags WHERE arn like "%secret%" ORDER BY arn ''' return sql
def check(args): """ actual check :return: """ me = w(args) boto3.setup_default_session(profile_name=args.profile) client = boto3.client('iam') response = client.simulate_principal_policy( PolicySourceArn=me, ActionNames=["iam:DetachRolePolicy", "iam:AttachRolePolicy"]) for result in response['EvaluationResults']: if result['EvalDecision'] == 'allowed': shlog.normal(result['EvalActionName'] + ' permission check ok!') else: shlog.normal('AWS reported ' + me + ' is not allowed to perform ' + result['EvalActionName'])
def tst_has_standard_criticality(self): """ Test for ARN's that have criticality, but not one of the standard values """ shlog.normal("looking for non standard criticality values") sql = ''' SELECT short_arn, tag, value FROM tags WHERE tag = "Criticality" AND value not in ("Development", "Demonstration", "Production", "Investigation") ''' return sql
def xinf_s3_configuraton_asset_format(self): """ Report in format for asset catalog. """ shlog.normal("Reporting on bucket configuraiton in asset formant") sql = ''' SELECT 'R:Bucket configuration:'||bucket asset, 'R:Num policies grants:'||npolicies||','||ngrants description, 'R:Restrict Access to information' 'Business Value', 'D' type, 'R:'||region 'where' FROM s3 ''' self.df = self.q.q_to_df(sql)
def make_asset_secrets_format(self): """ Make table(s) for interetion into the master asset table. """ shlog.normal("building table secret assets ") sql = ''' CREATE TABLE asset_data_secrets AS SELECT "R:Secret held in "||short_arn asset, description, "D:Data providing access as described" business_value, "D:Unauthorized access to SCiMMA AWS accoun holder" impact_c, "C" type, short_arn "where" FROM secrets ''' r = self.q.q(sql)
def tst_has_standard_tags(self): """ Test for ARN's missing either Critiality or Service tags. """ shlog.normal("performing test for Criticality and Service Tags") sql = ''' SELECT short_arn, tag, value, arn FROM tags WHERE arn NOT IN (select ARN from tags where tag = "Criticality" INTERSECT select ARN from tags where tag = "Service" ) ''' return sql
def make_data(self): """ Make a table called CERTIFICATES based on certificates held in AWS """ if self.does_table_exist(): shlog.normal("{} data already collected".format(self.table_name)) return shlog.normal("beginning to make {} data".format(self.table_name)) #import pdb; pdb.set_trace() # Make a flattened table for the tag data. # one tag, value pair in each record. sql = """CREATE TABLE certificates (asset text, domain text, arn text, short_arn text, inuseby text, hash text, record json)""" shlog.verbose(sql) self.q.q(sql) # Get the tags for each region. # accomidate the boto3 API can retul data in pages for page, _ in self._pages_all_regions('acm', 'list_certificates'): for certificate in page['CertificateSummaryList']: arn = certificate["CertificateArn"] domain = certificate["DomainName"] short_arn = aws_utils.shortened_arn(arn) try: for page, _ in self._pages_all_regions('acm','describe_certificate',parameter={'CertificateArn':arn}): # for response in page: if 'Nothing' not in page.keys(): record = page record = record["Certificate"] inuseby = record["InUseBy"] inuseby = [aws_utils.shortened_arn(arn) for arn in inuseby] inuseby = ",".join(inuseby) asset = "Cert:{} for use by {}".format(domain, inuseby) hash = vanilla_utils.tiny_hash(arn) record = self._json_clean_dumps(record) sql = "INSERT INTO certificates VALUES (?, ?, ?, ?, ?, ?, ?)" params = (asset, domain, arn, short_arn, inuseby, hash, record) self.q.executemany(sql, [params]) # populate the all_json table self._insert_all_json("certificate", short_arn, record) except Exception as e: if "ResourceNotFoundException" in str(e): print("certificate not found!")
def priv(args): """ Make a request to elevate the target role to ProposedPoweruser :return: None """ boto3.setup_default_session(profile_name=args.profile) iam = boto3.resource('iam') role = iam.Role(args.role) detacher(args, role) # attach read-only shlog.normal( 'Attaching ProposedPoweruser and RoleManagementWithCondition to ' + args.role) response = role.attach_policy(PolicyArn='arn:aws:iam::' + args.accountid + ':policy/ProposedPoweruser') shlog.debug(response) response = role.attach_policy(PolicyArn='arn:aws:iam::' + args.accountid + ':policy/RoleManagementWithCondition') shlog.debug(response)
def clean_data(self): """ Remove items that are not relevant Tags with values beginning with uiuc- are not set by the scimma project they are set by our UIUC AWS provider """ shlog.normal("beginning to clean %s data" % self.name) sql = ''' DELETE FROM tags WHERE arn IN ( SELECT DISTINCT arn FROM tags WHERE value LIKE "uiuc-%" ) ''' self.q.q(sql) shlog.normal("%s data prepared" % self.name)
def make_data(self): """ Make a table called ec2 based on tagging data. This collection of data is based on the resourcetaggingapi If the tags table exists, then we take it data collection would result in duplicate rows. """ if self.does_table_exist(): shlog.normal("ec2 data already collected") return shlog.normal("beginning to make {} data".format(self.name)) # Make a flattened table for the tag data. # one tag, value pair in each record. sql = """CREATE TABLE ec2 ( instance TEXT, vpc TEXT, subnet TEXT, publicdnsname TEXT, privatednsame TEXT, privateipaddress TEXT, keyname TEXT, record JSON)""" shlog.verbose(sql) self.q.q(sql) # Get the tags for each region. # accomidate the boto3 API can retul data in pages for page, _ in self._pages_all_regions('ec2', 'describe_instances'): reservations = page["Reservations"] for r in reservations: for i in r["Instances"]: record = self._json_clean_dumps(i) instance = i.get("InstanceId",""); vpc = i.get("VpcId",""); subnet = i.get("SubnetId",""); publicdnsname =i.get("PublicDnsName",""); privatednsame =i.get('PrivateDnsName',""); privateipaddress = i.get("PrivateIpAddress",""); keyname= i.get("KeyName","") sql = '''INSERT INTO ec2 VALUES (?, ?, ?, ?, ?, ?, ?,?)''' list = (instance, vpc, subnet, publicdnsname, privatednsame, privateipaddress, keyname, record) self.q.executemany(sql,[list]) # populate the all_json table self._insert_all_json("ec2", instance, record)
def _did_purge_cache(self): """ Determine if database file is too old to use. return True if db file purged (or is memory only instance)e """ from security_scripts.information.lib import shlog import time import os import stat timeout = 60 * 60 * 4 pathname = self.dbfile # if memory DB or file not fond, its "as if" the cache needed to be purgess. if pathname == ":memory:": return True if not os.path.isfile(pathname): return True dbfile_age = time.time() - os.stat(pathname)[stat.ST_MTIME] if dbfile_age > timeout or self.flush: shlog.normal("removing stale database cache {}".format(pathname)) os.remove(pathname) return True else: shlog.normal("using database cache {}".format(pathname)) return False
def ec2stop(args): """ EC2 mass shutdown simulation :return: None """ shlog.normal('Simulating ec2stop') me = w(args) boto3.setup_default_session(profile_name=args.profile) client = boto3.client('iam') response = client.simulate_principal_policy( PolicySourceArn=me, ActionNames=["ec2:StopInstances"]) for result in response['EvaluationResults']: if result['EvalDecision'] == 'allowed': shlog.normal('ec2:StopInstances permission check ok!') else: shlog.normal('AWS reported ' + me + ' is not allowed to perform ec2:StopInstances!') from security_scripts.controls.buttons import ec2stop as e e(args, True)
def ec2stop(args, dryrun=False): """ Make a request to stop all ec2 instances :return: """ from botocore.exceptions import ClientError from security_scripts.information.lib import aws_utils as au # only works in plugin and IDE args.session = boto3.Session(profile_name=args.profile) regions = au.decribe_regions_df(args) # use for deployment # regions = {'RegionName':['us-east-2']} # test mode for region in regions['RegionName']: shlog.normal('Stopping region ' + region) # init connection to region and get instances there client = boto3.client('ec2', region_name=region) response = client.describe_instances()['Reservations'] # go through intance ids for inst in response: # ...and allow termination... instance = inst['Instances'][0]['InstanceId'] shlog.verbose('Allowing API termination for instance ' + instance + ' in region ' + region) response = client.modify_instance_attribute( InstanceId=instance, DisableApiTermination={'Value': False}) shlog.debug(response) # ...and perform halt shlog.normal('Stopping instance ' + instance + ' in region ' + region) try: response = client.stop_instances(InstanceIds=[instance], DryRun=dryrun, Force=True) except ClientError as ce: if dryrun: # client error is expected when simulating shlog.normal('Stop simulation succeeded for instance ' + instance) shlog.verbose('Success code: ' + str(ce)) else: # we might actually want to catch real exceptions raise ce pass
def main(args): """Run reports pulling in existing infrastructure and mapping them to services""" from security_scripts.information.lib import vanilla_utils from security_scripts.information.lib import xs3 from security_scripts.information.lib import xtags from security_scripts.information.lib import L0A from security_scripts.information.lib import L0A_L0B from security_scripts.information.lib import tag_counter from security_scripts.information.lib import untagged_lister from security_scripts.information.lib import L1_L2 from security_scripts.information.lib import L0B_L1 from security_scripts.information.lib import L2_L3 # decode full path to dbfile so imports from other directories don't get confused if args.dbfile != ':memory:': args.dbfile = os.path.abspath(args.dbfile) shlog.verbose(args) shlog.verbose("only tests matching %s will be considered", (args.only)) q = vanilla_utils.Q(args.dbfile, args.flush) # dict structure: lvl name: [import name, priority] xreports_dict = { 'L0A': ['L0A', 1], 'L0B': ['L0A_L0B', 2], 'L1': ['L0B_L1', 3], 'L2': ['L1_L2', 4], 'L3': ['L2_L3', 5] } if args.start not in xreports_dict.keys(): shlog.normal('illegal start parameter: {}'.format(args.start)) exit(0) if args.end not in xreports_dict.keys(): shlog.normal('illegal end parameter: {}'.format(args.start)) exit(0) for x in xreports_dict: # compare x's priority to arg's priority if xreports_dict[x][1] >= xreports_dict[args.start][ 1] and xreports_dict[x][1] <= xreports_dict[args.end][1]: exec_string = '{}.Acquire(args, "{}", q)'.format( xreports_dict[x][0], xreports_dict[x][0]) exec(exec_string) # there's more stuff down there, think about implementing it exit(0) s3_acquire = xs3.Acquire(args, "s3", q) tags_acquire = xtags.Acquire(args, "tags", q) tag_c_acquire = tag_counter.Acquire(args, "TAG_COUNTER", q) untagged = untagged_lister.Acquire(args, "UNTAGGED_LISTER", q) # at this point data is in the relattion DB if args.dump: # tag_acquire.print_data() # s3_acquire.print_data() # secret_acquire.print_data() # certificate_acquire.print_data() # load_balancer_acquire.print_data() # repos_acquire.print_data() # instances_acquire.print_data() exit() # reporting actions are driven by instanitating the classes. tag_reports = xtags.Report(args, "Tagging Rule Check", q) tag_c_report = tag_counter.Report(args, "Tagging Count Check", q) untagged_report = untagged_lister.Report(args, "Untagged Resources", q) exit()
def make_data(self): """ MAKE DATA FOR GIT REPOS. """ if self.does_table_exist(): shlog.normal("Assets already collected") return shlog.normal("beginning to make {} data".format(self.name)) # Master Table for other reports to deposit asset data they # one tag, value pair in each record. sql = "CREATE TABLE {} ({} TEXT)".format( self.table_name, " TEXT, ".join(self.asset_columns)) shlog.vverbose(sql) self.q.q(sql) # get all the tables. sql = """ SELECT name FROM sqlite_master WHERE type IN ('table','view') AND name NOT LIKE 'sqlite_%' AND name LIKE 'asset_data%' ORDER BY 1; """ for table in self.q.q(sql).fetchall(): table = table[0] #one item selects are retuned in a list. # # Below is a Massive query fond on the web to ger # column meta-data out of sqlite I'd like to # finds somemething simpler, but don;t want to touch this sql = """ SELECT m.name as tableName, p.name as columnName, p.type as columnType FROM sqlite_master m left outer join pragma_table_info((m.name)) p on m.name <> p.name WHERE m.name is '{}' AND columnName in {} order by tableName, columnName """.format(table, self.asset_columns) shlog.vverbose(sql) # # Now get a string with col, cl, cos # cols = [col for (_, col, _) in self.q.q(sql)] if not cols: shlog.verbose( "{} has no matching columns for asset table".format(table)) continue cols_text = ", ".join(cols) # # and populate the assets table with # fields that match # sql = """ INSERT INTO assets ({}) SELECT {} from {} """.format(cols_text, cols_text, table) shlog.vverbose(sql) self.q.q(sql)
def make_data(self): """ Make a table called TAGS based on tagging data. This collection of data is based on the resourcetaggingapi If the tags table exists, then we take it data collection would result in duplicate rows. """ if self.does_table_exist(): shlog.normal("tags data already collected") return shlog.normal("beginning to make {} data".format(self.name)) # Make a flattened table for the tag data. # one tag, value pair in each record. sql = "create table s3 (asset text, bucket text, arn text, region text, npolicies text, ngrants text, grants json, policy_status text, bucket_policy JSON, record JSON)" shlog.verbose(sql) self.q.q(sql) # get buckets from local storage response = self.json_from_file(self.s_path)[0] # still need to init session = boto3.Session(profile_name=self.args.profile) client = session.client('s3') # the rest is untouched - this module still makes boto3 requests. shoudl we keep this info locally? # should we pull it with L0A? #ignore the 'ResponseMetadata' == seems to be no pagination context in there. buckets = response["Buckets"] for bucket in buckets: name = bucket["Name"] # arn can be computed from aws partition (e.g aws, aws-us-gov) and bucket name arn = "arn:{}:s3:::{}".format("aws", name) region = client.head_bucket( Bucket=name )['ResponseMetadata']['HTTPHeaders']['x-amz-bucket-region'] grants = client.get_bucket_acl(Bucket=name)["Grants"] ngrants = len(grants) asset = "aws bucket:{}".format(name) try: result = client.get_bucket_policy_status(Bucket=name) policy_status = result["PolicyStatus"] bucket_policy = client.get_bucket_policy(Bucket=name)["Policy"] npolicies = len(bucket_policy) except botocore.exceptions.ClientError: policy_status = {"IsPublic": False} bucket_policy = [] npolicies = 0 except: raise record = bucket record['BucketPolicy'] = policy_status record['PolicyStatus'] = bucket_policy record['Region'] = region record = self._json_clean_dumps(record) sql = '''INSERT INTO s3 VALUES (?,?,?,?,?,?,?,?,?,?)''' npolicies = "{}".format(npolicies) ngrants = "{}".format(ngrants) list = (asset, name, arn, region, npolicies, ngrants, json.dumps(grants), json.dumps(policy_status), json.dumps(bucket_policy), record) self.q.executemany(sql, [list]) # populate the all_json table self._insert_all_json("s3", name, record)
def make_data(self): """ Make a table called TAGS based on tagging data. This collection of data is based on the resourcetaggingapi If the tags table exists, then we take it data collection would result in duplicate rows. """ if self.does_table_exist(): shlog.normal("repos already collected") return if self.netrc_has_credentials(): shlog.normal('using api.github.com login+token from ~/.netrc') else: shlog.normal('api.github.com credentials not found in ~/.netrc') exit(0) shlog.normal("beginning to make {} data".format(self.name)) # Make a flattened table for the tag data. # one tag, value pair in each record. sql = "CREATE TABLE repos (name TEXT, description TEXT, url TEXT, who TEXT, " \ "can_pull text, can_push text, admins text, hash TEXT, record JSON)" shlog.verbose(sql) self.q.q(sql) #n.b. beta interface when this was coded cmd = 'curl -n -H "Accept: application/vnd.github.inertia-preview+json" ' \ ' https://api.github.com/orgs/scimma/repos' import subprocess #n.b check will throw an execption if curl exits with non 0 #rik is that we get valid output, lokin of like a "404" page. shlog.verbose(cmd) result = subprocess.run(cmd, text=True, capture_output=True, shell=True, check=True) stdout = result.stdout stderr = result.stderr if len(result.stdout) < 200: shlog.verbose("github curl error: stdout:{} stderr:{}".format( stdout, stderr)) exit(1) result = json.loads(result.stdout) for repo in result: # import pdb; pdb.set_trace() name = repo['full_name'] description = "{}(Private={})".format(repo['description'], repo['private']) where = repo['url'] #url who = repo['owner']['login'] # is really the account. hash = vanilla_utils.tiny_hash(name) record = json.dumps(result) # get user list collaborators_url = repo['collaborators_url'].split('{')[0] cmd = 'curl -n ' + collaborators_url result = subprocess.run(cmd, text=True, capture_output=True, shell=True, check=True) result = json.loads(result.stdout) try: # attempt to sort the users members = { 'can_pull': self.get_members(result, 'pull'), 'can_push': self.get_members(result, 'push'), 'admins': self.get_members(result, 'admin') } except subprocess.CalledProcessError: # repo is unreadable cmd = 'curl -n ' + collaborators_url + ' | jq ".message"' CPE = subprocess.run(cmd, text=True, capture_output=True, shell=True, check=True) CPE = json.loads(result.stdout) admins = result sql = "INSERT INTO repos VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)" self.q.executemany( sql, [(name, description, where, who, members['can_pull'], members['can_push'], members['admins'], hash, record)])