def exists(self, instance_id): if redis_store.get( ("credentials-{instance_id}").format(instance_id=instance_id) ): return True else: return False
def __init__(self): if self.is_cached() == True: self.advisor_object = pickle.loads(redis_store.get('advisor')) self.results = self.advisor_object.results else: self.results = self.json() self.__serialize()
def is_cached(self): if redis_store.get( ("credentials-{instance_id}").format(instance_id=self.instance_id) ): return True else: return False
def search_case_files(self, snapshot_id): case_files = controllers.enumerate_bucket(redis_store.get('case_bucket')) plaso_file = "{snapshot_id}.plaso".format(snapshot_id=snapshot_id) if plaso_file in case_files: return True else: return False
def unpickle_object(key): try: print unpickling unpickled_object = pickle.loads(redis_store.get(key)) return unpickled_object except: print "exception unpickling" return None
def search_case_files(self, snapshot_id): case_files = controllers.enumerate_bucket( redis_store.get('case_bucket')) plaso_file = "{snapshot_id}.plaso".format(snapshot_id=snapshot_id) if plaso_file in case_files: return True else: return False
def load_credential(self, instance_id): try: credential_object = pickle.loads( redis_store.get(("credentials-{instance_id}").format( instance_id=instance_id))) except: credential_object = super(Asset_Credential, self).__init__() return credential_object
def __init__(self): iam_client = boto3.client('iam') iam = boto3.resource('iam') if self.is_cached() == True: self.snapshot_object = pickle.loads(redis_store.get('snapshots')) self.regions = self.snapshot_object.regions self.snapshots = self.snapshot_object.snapshots else: self.regions = Region().get_all() self.snapshots = self.get_case_snapshots() self.__serialize()
def __init__(self): if self.is_cached() == True: self.inventory_object = pickle.loads(redis_store.get('inventory')) self.regions = self.inventory_object.regions self.instances = self.inventory_object.instances self.processed_instances = self.inventory_object.processed_instances else: self.regions = Region().get_all() self.instances = Instance().get_all_running() self.processed_instances = Instance().get_all_processed() self.__serialize()
def __init__(self): if self.is_cached() == True: self.asset_object = pickle.loads(redis_store.get('asset')) self.case_number = self.asset_object.case_number self.bucket = self.asset_object.bucket self.case_files = self.asset_object.case_files self.case_instances = self.asset_object.case_instances else: self.case_number = app.config.get('CASE_NUMBER') self.bucket = self.set_case_bucket() self.case_files = self.get_case_files() self.case_instances = self.get_case_instances() self.__serialize()
def load_credential(self, instance_id): try: credential_object = pickle.loads( redis_store.get( ( "credentials-{instance_id}" ).format( instance_id=instance_id ) ) ) except: credential_object = super(Asset_Credential, self).__init__() return credential_object
def process_asset(self, instance_id, instance_inventory): credentials = Asset_Credential().load_credential(instance_id) if credentials is not None: user = credentials.username ssh_key_file = self.write_temporary_key(credentials.sshkey) else: user = "******" ssh_key_file = self.write_temporary_key("empty key") compromised_host_ip = instance_inventory[0]['public_ip_address'] bucket = redis_store.get('case_bucket') case_number = app.config.get('CASE_NUMBER') delay_awsir.delay(case_number=case_number, bucket=bucket, user=user, ssh_key_file=ssh_key_file, compromised_host_ip=compromised_host_ip) #Async job to update case files in the background delay_refresh_all_redis.delay() pass
def process_asset(self, instance_id, instance_inventory): credentials = Asset_Credential().load_credential(instance_id) if credentials is not None: user = credentials.username ssh_key_file = self.write_temporary_key(credentials.sshkey) else: user = "******" ssh_key_file = self.write_temporary_key("empty key") compromised_host_ip = instance_inventory[0]['public_ip_address'] bucket = redis_store.get('case_bucket') case_number = app.config.get('CASE_NUMBER') delay_awsir.delay( case_number=case_number, bucket=bucket, user=user, ssh_key_file=ssh_key_file, compromised_host_ip=compromised_host_ip ) #Async job to update case files in the background delay_refresh_all_redis.delay() pass
def __init__(self): self.s3 = boto3.resource('s3') self.case_bucket = redis_store.get('case_bucket') self.client = boto3.client('s3', 'us-west-2')
def is_cached(self): if redis_store.get('inventory'): return True else: return False
def is_cached(self): if redis_store.get('asset') != None: return True else: return False
def is_cached(self): if redis_store.get('advisor'): return True else: return False
def __init__(self): self.case_files = controllers.enumerate_bucket( redis_store.get('case_bucket')) self.memory_files = self.lime_files()
def is_timesketch(self, snapshot_id): if redis_store.get(snapshot_id) == "timesketch": return True else: return False
def in_progress(self, snapshot_id): if redis_store.get(snapshot_id) == "processing": return True else: return False
def launch_processor(self, snapshot_id, region): client = Connect(region=region).client ec2_resource = boto3.resource('ec2', region) bucket = redis_store.get('case_bucket') script = """#!/bin/bash mkdir -p /tmp/plaso for i in $(lsblk -r |awk '{ print $1 }'|grep -v md | grep -v docker | grep -v xvda |grep -v loop |grep .*[[:digit:]]|sort|uniq;); do echo $i docker run --privileged --env SNAP=SNAPSHOT_ID -v /dev/$i:/dev/$i -v /tmp:/tmp threatresponse/plaso > /tmp/plaso/SNAPSHOT_ID.log; done while [ `docker ps | grep plaso | cut -d ' ' -f1 | wc -l` == '1' ] do echo "Plaso running" done aws s3 sync /tmp/plaso/ s3://BUCKETNAME_HERE sleep 60 poweroff """ script = script.replace('BUCKETNAME_HERE', bucket) script = script.replace('SNAPSHOT_ID', snapshot_id) instance = ec2_resource.create_instances( ImageId=ThreatResponseAMI().ami_ids[region], MinCount=1, MaxCount=1, #KeyName='YOUR-DEBUGGING-KEY', InstanceType='m3.large', IamInstanceProfile={ 'Name': "cloudresponse_workstation-{case_number}-{region}".format( case_number=app.config.get('CASE_NUMBER'), region=region ) }, InstanceInitiatedShutdownBehavior='terminate', BlockDeviceMappings=[ { 'VirtualName': 'ephemeral1', 'DeviceName': '/dev/sdf', 'Ebs': { 'SnapshotId': snapshot_id, 'DeleteOnTermination': True, 'VolumeType': 'gp2', }, }, ], UserData=script ) instance_id = str(instance[0]).split('=')[1].rstrip(')').split('\'')[1].rstrip('\'') redis_store.set(snapshot_id, 'processing') client.create_tags( Resources=[ instance_id ], Tags=[ {'Key': 'snapshot-processor','Value': "{snapshot_id}".format(snapshot_id=snapshot_id)} ] ) delay_refresh_all_redis.delay() return instance_id
def is_cached(self): if redis_store.get(("credentials-{instance_id}").format( instance_id=self.instance_id)): return True else: return False
def set_case_bucket(self): if redis_store.get('case_bucket') != None: return redis_store.get('case_bucket') else: case_bucket = controllers.set_case_bucket() return case_bucket
def snapshot_state(self, snapshot_id): if redis_store.get(snapshot_id) is None: self.has_plaso(snapshot_id) return redis_store.get(snapshot_id)
def exists(self, instance_id): if redis_store.get( ("credentials-{instance_id}").format(instance_id=instance_id)): return True else: return False
def is_cached(self): if redis_store.get('snapshots'): return True else: return False
def __init__(self): self.case_files = controllers.enumerate_bucket(redis_store.get('case_bucket')) self.memory_files = self.lime_files()
def launch_processor(self, snapshot_id, region): client = Connect(region=region).client ec2_resource = boto3.resource('ec2', region) bucket = redis_store.get('case_bucket') script = """#!/bin/bash mkdir -p /tmp/plaso for i in $(lsblk -r |awk '{ print $1 }'|grep -v md | grep -v docker | grep -v xvda |grep -v loop |grep .*[[:digit:]]|sort|uniq;); do echo $i docker run --privileged --env SNAP=SNAPSHOT_ID -v /dev/$i:/dev/$i -v /tmp:/tmp threatresponse/plaso > /tmp/plaso/SNAPSHOT_ID.log; done while [ `docker ps | grep plaso | cut -d ' ' -f1 | wc -l` == '1' ] do echo "Plaso running" done aws s3 sync /tmp/plaso/ s3://BUCKETNAME_HERE sleep 60 poweroff """ script = script.replace('BUCKETNAME_HERE', bucket) script = script.replace('SNAPSHOT_ID', snapshot_id) instance = ec2_resource.create_instances( ImageId=ThreatResponseAMI().ami_ids[region], MinCount=1, MaxCount=1, #KeyName='YOUR-DEBUGGING-KEY', InstanceType='m3.large', IamInstanceProfile={ 'Name': "cloudresponse_workstation-{case_number}-{region}".format( case_number=app.config.get('CASE_NUMBER'), region=region) }, InstanceInitiatedShutdownBehavior='terminate', BlockDeviceMappings=[ { 'VirtualName': 'ephemeral1', 'DeviceName': '/dev/sdf', 'Ebs': { 'SnapshotId': snapshot_id, 'DeleteOnTermination': True, 'VolumeType': 'gp2', }, }, ], UserData=script) instance_id = str( instance[0]).split('=')[1].rstrip(')').split('\'')[1].rstrip('\'') redis_store.set(snapshot_id, 'processing') client.create_tags(Resources=[instance_id], Tags=[{ 'Key': 'snapshot-processor', 'Value': "{snapshot_id}".format(snapshot_id=snapshot_id) }]) delay_refresh_all_redis.delay() return instance_id