def attach_new_storage_to_current_instance(self): inst = self.get_instance_object_by_instance_id(self.__current_instance_name) vol = self.__conn.create_volume(1,self.__conn.region) time.sleep(30) curr_vol = self.__conn.get_all_volumes([vol.id])[0] while curr_vol.status != 'available': time.sleep(10) Logger.logger("info", "pending to make volume available") self.__conn.attach_volume (vol.id, inst.id, "/dev/sdf") Logger.log("info", "The volume {} attached to this instance".format(vol.id))
def get_parameter(self, scope, param): if self.cfg_tree is not None: try: scopeNode = self.cfg_tree.find(scope) return scopeNode.find(param).text except: error_message = "Cannot find the configuration for the scope {} and parameter {}".format(scope, param) Logger.log("error", error_message) raise GenericException(error_message) return ""
def create_dynamic_role(self): random_id = uuid.uuid4().get_hex() with open(self.__iam_basic_policy_path, "r") as policy_file: iam_role_name = "{}-{}".format(self.__prefix_name, random_id) iam_policy_document = policy_file.read().replace("BUCKETNAME", "{}*".format(self.__prefix_name)) self.__conn.create_role(iam_role_name) self.__conn.create_instance_profile(iam_role_name) self.__conn.add_role_to_instance_profile(iam_role_name, iam_role_name) self.__conn.put_role_policy(iam_role_name, self.__iam_policy_name, iam_policy_document) Logger.log("info", "Created a dynamic role named {}".format(iam_role_name)) return iam_role_name
def generate_and_store_encryption_key(self): bucket = self.__s3.create_bucket(self.__bucket_unique_id) bucket.set_policy(self.__get_bucket_policy) from boto.s3.key import Key key_object = Key(bucket) key_object.key = "key" encryption_key = self.__generate_encryption_key() key_object.set_contents_from_string(encryption_key, {"Referer": self.__get_referer_unique_id()}, True) expires_in_seconds = 1800 key_object.generate_url(expires_in_seconds) Logger.log("info", "Encryption key uploaded to S3 bucket named {}".format(self.__bucket_unique_id))
def __init__(self,): path = self.__get_path() import xml.etree.ElementTree as ET try: self.cfg_tree = ET.parse(path) except: error_message = "Cannot find the configuration file {}".format(path) Logger.log("error", error_message) raise GenericException(error_message) return
def get_instance_credentials(): try: role_name = EnvironmentVariables.get_current_instance_profile(); creds_url = "http://169.254.169.254/latest/meta-data/iam/security-credentials/{}".format(role_name) response = urllib2.urlopen(creds_url).read() parsed_response = json.loads(response) boto_cfg_access_key_value = parsed_response["AccessKeyId"] boto_cfg_secret_key_value = parsed_response["SecretAccessKey"] boto_cfg_token = parsed_response["Token"] return "{} {} {}".format(boto_cfg_access_key_value, boto_cfg_secret_key_value, boto_cfg_token) except: Logger.log("error","Cannot get instance credentials from {}".format(creds_url)) raise
def __get_file_contents_list_from_bucket(bucket, prefix, bucket_name): json_files_list = [] for key in bucket.list(prefix=prefix): if key.name.endswith('/') or key.name.endswith('-done'): continue try: new_key_name = "{}-done".format(key.name) bucket.copy_key(new_key_name=new_key_name, src_bucket_name=bucket_name, src_key_name=key.name) bucket.delete_key(key.name) new_key = bucket.get_key(new_key_name) new_key.get_contents_to_filename(filename="tmp.json.gz") f = gzip.open('tmp.json.gz', 'rb') json_files_list.append(f.read()) f.close() except Exception as ex: Logger.log("warning", "{} FAILED: {}".format(key.name, ex.message)) return json_files_list
def create_secure_instance(self, image_id, instance_type, instance_name): script_path = self.__cfg.get_parameter("Instances", "CloudInitScriptPath") production_security_group_id = self.__cfg.get_parameter("Instances", "RemediationSecurityGroupId") production_subnet_id = self.__cfg.get_parameter("Instances", "ProductionSubnetId") key_name = self.__cfg.get_parameter("Instances", "EC2KeyName") with open(script_path, "r") as script_file: cloud_init_script = script_file.read() iam_role = IAMAdmin() instance_profile = iam_role.create_dynamic_role() new_reservation = self.__try_create_instance(image_id, key_name, instance_profile, instance_type, production_subnet_id, production_security_group_id, cloud_init_script) instance = new_reservation.instances[0] self.__conn.create_tags([instance.id], {"Name": instance_name}) message = "An instance created with id {}".format(instance.id) Logger.log("info", message) return message
def __try_create_instance(self, ami_id, key_name, profile_name, instance_type, subnet_id, security_group_id, user_data): try: new_reservation = self.__conn.run_instances(ami_id, key_name=key_name, instance_profile_name=profile_name, instance_type=instance_type, subnet_id=subnet_id, security_group_ids=[security_group_id], user_data=user_data) return new_reservation except: Logger.log("warning", "Could not create instance first time. Waiting another few seconds before retrying") time.sleep(30) Logger.log("warning", "Retrying to create instance") try: new_reservation = self.__conn.run_instances(ami_id, key_name=key_name, instance_profile_name=profile_name, instance_type=instance_type, subnet_id=subnet_id, security_group_ids=[security_group_id], user_data=user_data) return new_reservation except Exception as ex: message = "Cannot create new instance: {}".format(ex.message) raise GenericException(message)
def verify_management(self): node = Node(self.__get_hostname()) if node.exists: Logger.log("info", "The server is managed by Chef") return raise RemediationException("The server is not managed by Chef. Please make sure it is managed before promoting to production.")
def __init__(self, message): super(RemediationException, self).__init__(message) Logger.log("critical", message)
def __init__(self, message): super(GenericException, self).__init__(message) Logger.log("error", message)
def strict_current_instance_role_permissions(self): iam = IAM() current_role_name = EnvironmentVariables.get_current_instance_profile() iam.strict_dynamic_role(current_role_name) Logger.log("info", "Changed the IAM role to be more strict")
def move_current_instance_to_production_group(self): production_group_id = self.__cfg.get_parameter("Instances", "ProductionSecurityGroupId") instance = self.get_instance_object_by_instance_id(self.__current_instance_name) self.__conn.modify_instance_attribute(self.__current_instance_name, "groupSet", [production_group_id]) Logger.log("info", "This instance moved to the production subnet {}".format(production_group_id))
def get_encryption_key(self): bucket = self.__s3.get_bucket(self.__bucket_unique_id) key = bucket.get_key("key", {"Referer": self.__get_referer_unique_id()}) response = key.get_contents_as_string({"Referer": self.__get_referer_unique_id()}) Logger.log("info", "Encryption key downloaded") return response
__author__ = 'nirv' from Chef.ConfigurationManagement import ChefClient from NessusScanner.VulnerabilityAssessment import Scanner from CloudServices.Common.Exceptions import RemediationException,GenericException from CloudServices.IaaS.Instances import EC2Instance from CloudServices.Common.Logger import Logger ec2 = EC2Instance() try: chef_client = ChefClient() chef_client.verify_management() nessus = Scanner() nessus.run_scan() ec2.move_current_instance_to_production_group() ec2.strict_current_instance_role_permissions() except RemediationException as re: ## ec2.strict_current_instance_role_permissions() ## Depends on the business, it can be added. exit() except GenericException as ge: exit() except Exception as ex: Logger.log("error", ex.message) exit()