def test_create_key_pair(self, init_mock): """ Exercises the code path for creating key pair """ init_mock.return_value = None instance = Instance() instance.key_pair_name = self.instance.key_pair_name instance.ec2_client = Mock() instance.ec2_client.create_key_pair.return_value = { "KeyMaterial": "dummy key" } key_path = instance._create_key_pair() self.assertEquals(key_path, f"./{self.instance.key_pair_name}.pem") key = open(key_path, "r") self.assertEquals(key.read(), "dummy key") os.remove(f"./{self.instance.key_pair_name}.pem")
def setUp(self, create_key_pair, boto3_mock): """ Initialise mock instance for the test cases """ create_key_pair.return_value = "fakekey.pem" self.instance = Instance( AWS_access_key_id="1234", AWS_secret_access_key="1234", region_name="abc_region", base_image_id="ami-123", os="amazonLinux", security_group_id="sg-1234", AMI_name="OpenDistroAMI", RPM_package_version="1.0.0", APT_OSS_version="1.0.0", ) self.boto_instance = boto3_mock.resource().create_instances( )[0] # i could not figure out the any other way to mock boto3 instance self.instance.key_pair_name = "ODFEAMIInstanceKey"
def pick_instance(args): instances = Instance.elb_instances(target_group_arn_for(args)) if len(instances) == 1: return instances[0] while True: print_instances(instances, number=True) inst = input('Which instance? ') try: return instances[int(inst)] except: pass
def pick_instance(cfg: Config): elb_instances = Instance.elb_instances(target_group_arn_for(cfg)) if len(elb_instances) == 1: return elb_instances[0] while True: print_instances(elb_instances, number=True) inst = input('Which instance? ') try: return elb_instances[int(inst)] except (ValueError, IndexError): pass
def wait_for_autoscale_state(instance: Instance, state: str) -> None: logger.info("Waiting for %s to reach autoscale lifecycle '%s'...", instance, state) while True: autoscale = instance.describe_autoscale() if not autoscale: logger.error("Instance is not longer in an ASG: stopping") return cur_state = autoscale['LifecycleState'] logger.debug("State is %s", cur_state) if cur_state == state: logger.info("...done") return time.sleep(5)
def test_incorrect_credentials(self): """ Fail with descriptive error if Instance can be created with wrong credentials """ with self.assertRaises(ClientError): Instance( AWS_access_key_id="wrong", AWS_secret_access_key="wrong", region_name="us-east-2", base_image_id="ami-123", os="amazonLinux", security_group_id="sg-1234", AMI_name="OpenDistroAMI", RPM_package_version="1.0.0", APT_OSS_version="1.0.0", )
def test_incorrect_region(self): """ Fail with descriptive error if Instance can be created for wrong region """ with self.assertRaises(ValueError): Instance( region_name="wrong_region", AWS_access_key_id="1234", AWS_secret_access_key="1234", base_image_id="ami-123", os="amazonLinux", security_group_id="sg-1234", AMI_name="OpenDistroAMI", RPM_package_version="1.0.0", APT_OSS_version="1.0.0", )
def pick_instances(args): # TODO, maybe something in args to select only some? return Instance.elb_instances(target_group_arn_for(args))
def instances_status_cmd(args): print_instances(Instance.elb_instances(target_group_arn_for(args)), number=False)
def pick_instances(cfg: Config): return Instance.elb_instances(target_group_arn_for(cfg))
def instances_status(cfg: Config): """Get the status of the instances.""" print_instances(Instance.elb_instances(target_group_arn_for(cfg)), number=False)
def AMI_builder( AWS_access_key_id, AWS_secret_access_key, region_name, base_image_id, os, security_group_id, AMI_name, RPM_package_version, APT_OSS_version, ): """ Builds the ODFE AMI args: AWS_access_key_id: str, aws key id AWS_secret_access_key: str, aws secret access key region_name: str, region where the Instance will be created base_image_id: str, base os AMI id, os: str, ubuntu or amazonLinux security_group_id: str, security group with port 22 open AMI_name: str, Name of the AMI that will be created RPM_package_version: str, version of ODFE to be installed if RPM is used for installation(used in amazon linux) APT_OSS_version: str, version of Elasticsearch OSS to be installed if apt is used for installation(used in ubuntu) returns: none """ try: instance = Instance( AWS_access_key_id=AWS_access_key_id, AWS_secret_access_key=AWS_secret_access_key, region_name=region_name, base_image_id=base_image_id, os=os, # ubuntu, amazonLinux security_group_id=security_group_id, AMI_name=AMI_name, RPM_package_version=RPM_package_version, APT_OSS_version=APT_OSS_version, ) except Exception as err: logging.error("Could not bring up the instance. " + str(err)) sys.exit(-1) AMI_id = "" installation_failed = False try: instance.wait_until_ready() except Exception as err: logging.error("Could not bring the instance to ready state. " + str(err)) installation_failed = True else: try: instance.install_ODFE() AMI_id = instance.create_AMI() except Exception as err: installation_failed = True logging.error( "AMI creation failed there was an error see the logs. " + str(err)) finally: try: instance.cleanup_instance() except Exception as err: logging.error( "Could not cleanup the instance. There could be an instance currently running, terminate it. " + str(err)) installation_failed = True if installation_failed: sys.exit(-1) # copy the AMI to the required regions ec2_client = boto3.client( "ec2", aws_access_key_id=AWS_access_key_id, aws_secret_access_key=AWS_secret_access_key, region_name=region_name, ) AMI_copy_regions = [ region["RegionName"] for region in ec2_client.describe_regions()["Regions"] ] AMI_copy_regions.remove(region_name) # since AMI is created here copy_AMI_to_regions( AWS_access_key_id=AWS_access_key_id, AWS_secret_access_key=AWS_secret_access_key, AMI_id=AMI_id, AMI_name=AMI_name, AMI_source_region=region_name, AMI_copy_regions=AMI_copy_regions, )
class TestInstance(unittest.TestCase): @patch("lib.instance.boto3") @patch("lib.instance.Instance._create_key_pair") def setUp(self, create_key_pair, boto3_mock): """ Initialise mock instance for the test cases """ create_key_pair.return_value = "fakekey.pem" self.instance = Instance( AWS_access_key_id="1234", AWS_secret_access_key="1234", region_name="abc_region", base_image_id="ami-123", os="amazonLinux", security_group_id="sg-1234", AMI_name="OpenDistroAMI", RPM_package_version="1.0.0", APT_OSS_version="1.0.0", ) self.boto_instance = boto3_mock.resource().create_instances( )[0] # i could not figure out the any other way to mock boto3 instance self.instance.key_pair_name = "ODFEAMIInstanceKey" def test_initialization(self): """ Fail with descriptive error if required initialization have failed """ self.assertEqual("amazonLinux", self.instance.os) self.assertEqual("OpenDistroAMI", self.instance.AMI_name) self.assertEqual("1.0.0", self.instance.RPM_package_version) self.assertEqual("1.0.0", self.instance.APT_OSS_version) def test_incorrect_region(self): """ Fail with descriptive error if Instance can be created for wrong region """ with self.assertRaises(ValueError): Instance( region_name="wrong_region", AWS_access_key_id="1234", AWS_secret_access_key="1234", base_image_id="ami-123", os="amazonLinux", security_group_id="sg-1234", AMI_name="OpenDistroAMI", RPM_package_version="1.0.0", APT_OSS_version="1.0.0", ) def test_incorrect_credentials(self): """ Fail with descriptive error if Instance can be created with wrong credentials """ with self.assertRaises(ClientError): Instance( AWS_access_key_id="wrong", AWS_secret_access_key="wrong", region_name="us-east-2", base_image_id="ami-123", os="amazonLinux", security_group_id="sg-1234", AMI_name="OpenDistroAMI", RPM_package_version="1.0.0", APT_OSS_version="1.0.0", ) def test_wait_until_ready(self): """ Exercises the code path for waiting until instance is ready to be used """ self.boto_instance.wait_until_running.side_effect = [ "", WaiterError("blah", "blah", "blah"), ] self.boto_instance.state = -1 self.instance.wait_until_ready() self.boto_instance.wait_until_running.assert_called() self.assertRaises(WaiterError, self.instance.wait_until_ready) @patch("lib.instance.Instance.__init__") def test_create_key_pair(self, init_mock): """ Exercises the code path for creating key pair """ init_mock.return_value = None instance = Instance() instance.key_pair_name = self.instance.key_pair_name instance.ec2_client = Mock() instance.ec2_client.create_key_pair.return_value = { "KeyMaterial": "dummy key" } key_path = instance._create_key_pair() self.assertEquals(key_path, f"./{self.instance.key_pair_name}.pem") key = open(key_path, "r") self.assertEquals(key.read(), "dummy key") os.remove(f"./{self.instance.key_pair_name}.pem") def test_create_AMI(self): """ Exercises the code path for creating AMI """ self.boto_instance.create_image.return_value = Mock(image_id="123") self.instance.create_AMI() self.assertEquals(self.instance.snapshot.image_id, "123") self.instance.ec2_client.get_waiter().wait.side_effect = WaiterError( "bla", "blah", "blah") self.assertRaises(WaiterError, self.instance.create_AMI) @patch("lib.instance.ODFEInstaller") def test_install_ODFE(self, mock_installer): """ Exercises the code path for open distro installation in the instance """ self.instance.install_ODFE() mock_installer().install.assert_called() mock_installer().install.side_effect = Exception self.assertRaises(Exception, self.instance.install_ODFE) def test_cleanup_instance(self): """ Exercises the code path for cleaning up the instance after use """ self.boto_instance.wait_until_terminated.side_effect = [ "", WaiterError("blah", "blah", "blah"), "", ] temp = open(f"./{self.instance.key_pair_name}.pem", "w") self.instance.key_path = f"./{self.instance.key_pair_name}.pem" self.instance.cleanup_instance() self.boto_instance.wait_until_terminated.assert_called() temp = open(f"./{self.instance.key_pair_name}.pem", "w") self.assertRaises(WaiterError, self.instance.cleanup_instance) os.remove(f"./{self.instance.key_pair_name}.pem") self.assertRaises(FileNotFoundError, self.instance.cleanup_instance)
def handler(event, context): args = Args().args ec2_client = conn().boto3('ec2', args.region) cloudwatch_client = conn().boto3('cloudwatch', args.region) if args.volume and args.instance: logger.error("false. only one option is allowed") exit(1) if args.volume or args.instance: logger.info( "Defaulting to type 'create-snapshot' with inclusiong of arg: %s %s" % (args.instance, args.volume)) args.type = "create-snapshot" retention_day = timedelta(days=args.retention) start_date = Global.today - retention_day logger.info("*** Timing ***") logger.info("\tCurrent time: %i" % (Global.current_time)) logger.info("\tRetention: %i" % (args.retention)) logger.info("\tFull day in seconds: %i" % (Global.full_day)) logger.info("\tToday: %s" % (str(Global.today))) logger.info("\tTomorrow: %s" % (str(Global.tomorrow))) logger.info("\tYesterday: %s" % (str(Global.yesterday))) logger.info("\t2 Weeks Ago: %s" % (str(Global.two_weeks))) logger.info("\t4 Weeks Ago: %s" % (str(Global.four_weeks))) logger.info("\t30 Days Ago: %s" % (str(Global.thirty_days))) logger.info("\tRetention Time: %s" % (str(retention_day))) logger.info("\tStart Date: %s" % (str(start_date))) logger.info("\tShort Date: %s" % (Global.short_date)) logger.info("\tShort Hour: %s" % (Global.short_hour)) logger.info("") logger.info("*** Defined Args ***") logger.info("\targs.verbosity: %s" % (args.verbosity)) logger.info("\targs.type: %s" % (args.type)) logger.info("\targs.env: %s" % (args.env)) logger.info("\targs.volume: %s" % (args.volume)) logger.info("\targs.instance: %s" % (args.instance)) logger.info("\targs.retention: %s" % (args.retention)) logger.info("\targs.dry_run: %s" % (args.dry_run)) logger.info("\targs.region: %s" % (args.region)) logger.info("\targs.account_id: %s" % (args.account_id)) logger.info("\targs.rotation: %s" % (args.rotation)) logger.info("\targs.hourly: %s" % (args.hourly)) logger.info("\targs.persist: %s" % (args.persist)) logger.info("\targs.method: %s" % (args.method)) logger.info("\targs.include_ami: %s" % (args.include_ami)) logger.info("") Instance(ec2_client, args.dry_run).find(args.env, '') Volume(ec2_client, args.dry_run).find(cloudwatch_client, args.instance, args.volume, args.hourly, args.persist) if args.type != "create-snapshot" or args.type != "create-snapshots": Snapshot(ec2_client, args.dry_run).find(args.account_id, args.env, args.method) if not args.volume and not args.instance: if args.type != "clean-snapshot" or args.type != "clean-snapshots" or args.type != "clean-volume" or args.type != "clean-volumes": Image(ec2_client, args.dry_run).find(args.env, args.account_id) if args.type == "all" or args.type == "clean-snapshot" or args.type == "clean-snapshots" or args.type == "clean": snapshot_count = 0 logger.info("\n\n") logger.debug("Ignoring any env flag for cleanup: %s" % (args.env)) logger.info("") logger.info("*** Cleaning Snapshots ***") logger.debug("\tsnapshot_data len: %i" % (len(Global.snapshot_data))) for snapshot in Global.snapshot_data: logger.info("Retrieved snapshot: %s" % (snapshot)) if Global.volume_snapshot_count[Global.snapshot_data[snapshot] ['volume_id']]['count'] > 0: # if Global.volume_snapshot_count[Global.snapshot_data[snapshot]['volume_id']]['count'] > args.rotation and not Global.snapshot_data[snapshot]['persist'] and not Global.snapshot_data[snapshot]['id'] in Global.image_data: logger.debug("") logger.debug("snapshot id: %s" % (Global.snapshot_data[snapshot]['id'])) logger.debug("\tsnap_vol: %s" % (Global.snapshot_data[snapshot]['volume_id'])) logger.debug("\tsnap_desc: %s" % (Global.snapshot_data[snapshot]['description'])) logger.debug("\tsnap_date: %s" % (Global.snapshot_data[snapshot]['date'])) logger.debug("\tsnap_ratio: %s" % (Global.snapshot_data[snapshot]['ratio'])) logger.debug("\tsnap_age: %s" % (Global.snapshot_data[snapshot]['age'])) logger.debug("\tsnap_persist: %s" % (Global.snapshot_data[snapshot]['persist'])) logger.debug("\tsnap_method: %s" % (Global.snapshot_data[snapshot]['method'])) logger.debug("\tsnap_count: %s" % (Global.snapshot_data[snapshot]['snap_count'])) logger.debug( "\tvolume_snapshot_count: %s" % (Global.volume_snapshot_count[ Global.snapshot_data[snapshot]['volume_id']]['count'])) logger.debug("\trotation_scheme: %i" % (args.rotation)) logger.debug( "\tDeleting %s - [ snap_count:%s, volume_count:%s, persist: %s ] [ vol: %s ]" % (Global.snapshot_data[snapshot]['id'], Global.snapshot_data[snapshot]['snap_count'], Global.volume_snapshot_count[ Global.snapshot_data[snapshot]['volume_id']]['count'], Global.snapshot_data[snapshot]['persist'], Global.snapshot_data[snapshot]['volume_id'])) if Global.snapshot_data[snapshot][ 'volume_id'] not in Global.all_volumes: logger.debug( "\tvol: %s snap: %s snap_count: %s rotate: %i" % (Global.snapshot_data[snapshot]['volume_id'], Global.snapshot_data[snapshot]['id'], Global.volume_snapshot_count[Global.snapshot_data[ snapshot]['volume_id']]['count'], args.rotation)) ret_val = Snapshot(ec2_client, args.dry_run).delete( Global.snapshot_data[snapshot]['id'], '') snapshot_count = snapshot_count + ret_val Global.volume_snapshot_count[ Global.snapshot_data[snapshot] ['volume_id']]['count'] = Global.volume_snapshot_count[ Global.snapshot_data[snapshot] ['volume_id']]['count'] - ret_val else: logger.debug( "\tvol: %s snap: %s snap_count: %s rotate: %i" % (Global.snapshot_data[snapshot]['volume_id'], Global.snapshot_data[snapshot]['id'], Global.volume_snapshot_count[Global.snapshot_data[ snapshot]['volume_id']]['count'], args.rotation)) ret_val = Snapshot(ec2_client, args.dry_run).delete( Global.snapshot_data[snapshot]['id'], 'delete_snapshot') snapshot_count = snapshot_count + ret_val Global.volume_snapshot_count[ Global.snapshot_data[snapshot] ['volume_id']]['count'] = Global.volume_snapshot_count[ Global.snapshot_data[snapshot] ['volume_id']]['count'] - ret_val else: logger.warn("") logger.warn( "\tIgnoring deletion of %s - [ snap_count:%s, volume_count:%s, persist: %s ]" % (Global.snapshot_data[snapshot]['id'], Global.snapshot_data[snapshot]['snap_count'], Global.volume_snapshot_count[ Global.snapshot_data[snapshot]['volume_id']]['count'], Global.snapshot_data[snapshot]['persist'])) logger.info(" *** Total Snapshots Deleted: %s" % (snapshot_count)) if args.type == "all" or args.type == "clean-volume" or args.type == "clean-volumes" or args.type == "clean": volume_count = 0 logger.info("\n\n") logger.debug("Ignoring any env flag for cleanup: %s" % (args.env)) logger.info("*** Cleaning Volumes ***") logger.info( "*** Note: this tags items with tag { 'Delete': 'True' } ***\n") for volume in Global.volume_data: logger.info("Retrieved Volume: %s" % (volume)) volume_count = volume_count + 1 logger.debug("") logger.debug("volume_id: %s" % (Global.volume_data[volume]['id'])) logger.debug("\tvolume_instance_id: %s" % (Global.volume_data[volume]['instance_id'])) logger.debug("\tvolume_date: %s" % (Global.volume_data[volume]['date'])) logger.info(" *** Total Volumes To Delete: %s" % (volume_count)) if args.type == "all" or args.type == "clean-ami" or args.type == "clean" or args.type == "clean-images": image_count = 0 logger.info("\n\n") logger.debug("Ignoring any env flag for cleanup: %s" % (args.env)) logger.info("*** Cleaning Images ***") # logger.info("Include_ami: %s" % (args.include_ami)) logger.info("Images found: %i" % (len(Global.image_data))) for image in Global.image_data: image_count = image_count + 1 logger.debug("") logger.debug("ami_id: %s" % (Global.image_data[image]['id'])) logger.debug("\tami_name: %s" % (Global.image_data[image]['name'])) logger.debug("\tami_attachment_id: %s" % (Global.image_data[image]['date'])) logger.debug("\tami_snapshot_id: %s" % (Global.image_data[image]['snapshot_id'])) logger.debug("\tami_persist: %s" % (Global.image_data[image]['persist'])) logger.debug("\tami_build_method: %s" % (Global.image_data[image]['build_method'])) # this is disabled for now until we're sure we want to auto delete AMI's if args.include_ami: if Global.image_data[image]['persist'] != "True": logger.info("Deregistering AMI: %s" % (Global.image_data[image]['name'])) for ami_snapshot in Global.image_data[image][ 'snapshot_id']: logger.info("\t deleting snapshot: %s" % (ami_snapshot)) Snapshot(ec2_client, args.dry_run).delete(ami_snapshot, 'delete_snapshot') logger.info("\t deleting image: %s" % (Global.image_data[image]['id'])) Image(ec2_client, args.dry_run).delete( Global.image_data[image]['id'], Global.image_data[image]['name']) logger.info(" *** Total Images Deregistered: %s" % (image_count)) if args.type == "all" or args.type == "create-snapshot" or args.type == "create-snapshots": snapshot_count = 0 logger.info("\n\n") logger.info("*** Creating Snapshots ***") for s_volume in Global.snapshot_volumes: logger.debug("") logger.debug("\tsnapshot_volume['volume_id']: %s" % (Global.snapshot_volumes[s_volume]['id'])) logger.debug("\tsnapshot_volume['instance_id']: %s" % (Global.snapshot_volumes[s_volume]['instance_id'])) logger.debug("\tsnapshot_volume['date']: %s" % (Global.snapshot_volumes[s_volume]['date'])) logger.debug("\tsnapshot_volume['desc']: %s" % (Global.snapshot_volumes[s_volume]['desc'])) logger.debug("\tsnapshot_volume['old_desc']: %s" % (Global.snapshot_volumes[s_volume]['old_desc'])) logger.debug("\tsnapshot_volume['persist']: %s" % (Global.snapshot_volumes[s_volume]['persist'])) logger.debug("\tsnapshot_volume['hourly']: %s" % (Global.snapshot_volumes[s_volume]['hourly'])) snapshot_count = snapshot_count + Snapshot( ec2_client, args.dry_run).create( args.region, Global.snapshot_volumes[s_volume]['id'], Global.snapshot_volumes[s_volume]['desc'], Global.snapshot_volumes[s_volume]['old_desc'], Global.snapshot_volumes[s_volume]['persist']) logger.info(" *** Total Volumes to Snapshot: %s" % (snapshot_count)) return True