def set_from_dict(self, params): for p in self.needed_param_keys.keys(): if p not in params: raise PhantomAWSException('MissingParameter', details="parameter %s missing" % (p)) for pl in self.needed_param_list_keys.keys(): found = False for p in params: ndx = p.find(pl) if ndx == 0: found = True if not found: raise PhantomAWSException('MissingParameter', details="parameter %s missing" % (pl)) for p in params: self._do_list_param(params, p, self.needed_param_list_keys) self._do_list_param(params, p, self.optional_param_list_keys) # setup the needed ones for p in self.needed_param_keys: if p in params: val = self._get_value(p, params[p], self.needed_param_keys[p]) if val is not None: self.__dict__[p] = val # setup the optional ones for p in self.optional_param_keys: if p in params: val = self._get_value(p, params[p], self.optional_param_keys[p]) if val is not None: self.__dict__[p] = val
def get_user_object_by_access_id(self, access_id): """Get a new connection every time this is called to make sure it is cleaned up""" db = DB(self._cumulus_db) user_alias = User.find_alias(db, access_id) if not user_alias: raise PhantomAWSException('InvalidClientTokenId') l = list(user_alias) db.close() if l < 1: raise PhantomAWSException('InvalidClientTokenId') return PhantomUserObject(access_id, l[0].get_data(), l[0].get_friendly_name())
def create_launch_config(self, user_obj, lc): (dt_name, site_name) = _breakup_name(lc.LaunchConfigurationName) # see if that name already exists dt_def = None exists = self._check_dt_name_exists(dt_name, user_obj.access_id) if exists: dt_def = self._get_dt_details(dt_name, user_obj.access_id) if not dt_def: dt_def = {} dt_def['mappings'] = {} if site_name in dt_def['mappings']: raise PhantomAWSException('InvalidParameterValue', details="Name already in use") dt_def['mappings'][site_name] = {} # values needed by the system dt_def['mappings'][site_name]['iaas_allocation'] = lc.InstanceType dt_def['mappings'][site_name]['iaas_image'] = lc.ImageId dt_def['mappings'][site_name][ 'key_name'] = phantom_get_default_key_name() # user defined values dt_def['mappings'][site_name]['CreatedTime'] = make_time( lc.CreatedTime.date_time) #dt_def['mappings'][site_name]['BlockDeviceMappings'] = lc.BlockDeviceMappings dt_def['mappings'][site_name][ 'InstanceMonitoring'] = lc.InstanceMonitoring.Enabled dt_def['mappings'][site_name]['KernelId'] = lc.KernelId dt_def['mappings'][site_name]['RamdiskId'] = lc.RamdiskId dt_def['mappings'][site_name]['RequestedKeyName'] = lc.KeyName dt_def['mappings'][site_name][ 'LaunchConfigurationARN'] = lc.LaunchConfigurationARN if lc.UserData: try: lc.UserData = base64.b64decode(lc.UserData) except: raise PhantomAWSException( 'InvalidParameterValue', details="UserData should be base64-encoded") dt_def['contextualization'] = {} dt_def['contextualization']['method'] = 'userdata' dt_def['contextualization']['userdata'] = lc.UserData if exists: self._dtrs_client.update_dt(user_obj.access_id, dt_name, dt_def) else: self._dtrs_client.add_dt(user_obj.access_id, dt_name, dt_def)
def get_user_object_by_display_name(self, display_name): try: q = self._session.query(PhantomUserDBObject) q = q.filter(PhantomUserDBObject.displayname == display_name) db_obj = q.first() if not db_obj: raise PhantomAWSException('InvalidClientTokenId') return PhantomUserObject(db_obj.access_key, db_obj.access_secret, db_obj.displayname) except sqlalchemy.exc.SQLAlchemyError, ex: log( logging.ERROR, "A database error occurred while trying to access the user db %s" % (str(ex))) raise PhantomAWSException('InternalFailure')
def _create_autoscale_group(self, user_obj, asg): db_asg = self._db.get_asg(user_obj, asg.AutoScalingGroupName) if db_asg: raise PhantomAWSException('InvalidParameterValue', details="The name %s already exists" % (asg.AutoScalingGroupName)) db_lco = self._db.get_lcs(user_obj, [asg.LaunchConfigurationName,], max=1, log=self._log) if not db_lco: raise PhantomAWSException('InvalidParameterValue', details="The launch configuration name doesn't exist %s" % (asg.LaunchConfigurationName)) if len(asg.AvailabilityZones.type_list) < 1: raise PhantomAWSException('InvalidParameterValue', 'An AZ must be specified') db_asg = AutoscaleGroupObject() db_asg.set_from_outtype(asg, user_obj) return (db_asg, db_lco[0])
def set_value(self, name, value): if name in self.optional_param_keys: t = self.optional_param_keys[name] elif name in self.needed_param_keys: t = self.needed_param_keys[name] elif name in self.optional_param_list_keys: raise PhantomAWSException("We cannot work with this type yet") elif name in self.needed_param_list_keys: raise PhantomAWSException("We cannot work with this type yet") else: raise PhantomAWSException("%s is not a known value name" % (name)) if not phantom_is_primative(t): raise PhantomAWSException("We cannot work with this type yet") self.__setattr__(name, t(value))
def get_user_object_by_access_id(self, access_id): db_obj = self._lookup_user(access_id) if not db_obj: raise PhantomAWSException('InvalidClientTokenId') return PhantomUserObject(access_id, db_obj.access_secret, db_obj.displayname)
def _get_aws_access_key_method_two(req): auth_string_a = req.headers['Authorization'].split() metha = auth_string_a[0].split('-', 1) signature_method = metha[0] tokens = auth_string_a[1].split(',') res = {'Credential': None, 'Signature': None, 'SignedHeaders': None, 'SignatureMethod': signature_method, 'SignatureVersion': metha[1]} for t in tokens: for k in res.keys(): ndx = t.find(k) if ndx == 0: res[k] = t[len(k) + 1:] found = False optional_keys = ['Expires', 'Timestamp'] for k in optional_keys: if k in req.params.keys(): res[k] = req.params[k] found = True if not found: dt_str = req.headers['X-Amz-Date'].replace('Z', 'UTC') dt = datetime.datetime.strptime(dt_str, '%Y%m%dT%H%M%S%Z') dt_str2 = dt.strftime('%Y-%m-%dT%H:%M:%SZ') res['Timestamp'] = dt_str2 for k in res.keys(): if res[k] is None: PhantomAWSException("The key %s is required in the Authorization parameter.") a = res['Credential'].split('/', 1) res['AWSAccessKeyId'] = a[0] res['CredentialScope'] = a[1] return res
def __call__(self, req): before = time.time() user_obj = None request_id = str(uuid.uuid4()) try: log(logging.INFO, "%s Enter main router | %s" % (request_id, str(req.params))) authz = self._cfg.get_authz() access_dict = get_aws_access_key(req) user_obj = authz.get_user_object_by_access_id(access_dict['AWSAccessKeyId']) authenticate_user(user_obj.secret_key, req, access_dict) key = 'Action' if key not in req.params.keys(): raise PhantomAWSException('InvalidParameterValue') action = req.params['Action'] global _action_to_application_map if action not in _action_to_application_map: raise webob.exc.HTTPNotFound("No action %s" % action) app_cls = _action_to_application_map[action] log(logging.INFO, "%s Getting phantom action %s" % (request_id, action)) app = app_cls(action, cfg=self._cfg) except Exception, ex: log(logging.ERROR, "%s Exiting main router with error %s" % (request_id, str(ex))) raise
def delete_autoscale_group(self, user_obj, name, force): asg = self._db.get_asg(user_obj, name) if not asg: raise PhantomAWSException('InvalidParameterValue', details="The name %s does not exists" % (name)) self._db.delete_asg(asg) self._db.db_commit()
def alter_autoscale_group(self, user_obj, name, new_conf, force): asg = self._db.get_asg(user_obj, name) if not asg: raise PhantomAWSException('InvalidParameterValue', details="The name %s does not exists" % (asg.AutoScalingGroupName)) asg.DesiredCapacity = new_conf['desired_capacity'] self._db.db_commit()
def delete_launch_config(self, user_obj, name): (dt_name, site_name) = _breakup_name(name) dt_def = self._get_dt_details(dt_name, user_obj.access_id) if not dt_def: raise PhantomAWSException('InvalidParameterValue', details="Name %s not found" % (name)) if site_name not in dt_def['mappings']: raise PhantomAWSException('InvalidParameterValue', details="Name %s not found" % (name)) del dt_def['mappings'][site_name] if len(dt_def['mappings']) == 0: self._dtrs_client.remove_dt(user_obj.access_id, dt_name) else: self._dtrs_client.update_dt(user_obj.access_id, dt_name, dt_def)
def create_launch_config(self, user_obj, lc): try: lco = LaunchConfigurationObject() lco.set_from_outtype(lc, user_obj) self._db.db_obj_add(lco) self._db.db_commit() except IntegrityError, ie: self._log.error("DB error %s" % (str(ie))) raise PhantomAWSException('InvalidParameterValue',details="Name already in use")
def _breakup_name(name): s_a = name.split("@", 1) if len(s_a) != 2: raise PhantomAWSException( 'InvalidParameterValue', details= "The name %s is not in the proper format. It must be <dt name>@<site name>" % (name)) return (s_a)
def get_user_object_by_access_id(self, access_id): """Get a new connection every time this is called to make sure it is cleaned up""" fptr = open(self._filepath, "r") try: for line in fptr: la = line.split() if len(la) != 3: raise PhantomAWSException( "InternalFailure", details="Invalid security file %s" % (self._filepath)) access_key = la[0] secret_key = la[1] display_name = la[2] if access_key == access_id: return PhantomUserObject(access_id, secret_key, display_name) raise PhantomAWSException('InvalidClientTokenId') finally: fptr.close()
def call(sqlobj, *args, **kwargs): sqlobj._open_dbobj() try: return func(sqlobj, *args, **kwargs) except sqlalchemy.exc.SQLAlchemyError, ex: log( logging.ERROR, "A database error occurred while trying to access the user db %s" % (str(ex))) raise PhantomAWSException('InternalFailure', ex)
def tags_to_definition(Tags): definition_name = g_default_definition parameters = {} # this first loop simply eliminates the need to have the definition be the first entry for tag in Tags: if tag.Key == g_definition_key_name: definition_name = tag.Value else: parameters[tag.Key] = tag.Value # this next call may later be replaced by a call to the epu system if definition_name not in g_known_templates.keys(): raise PhantomAWSException( 'InvalidParameterValue', details="%s is an unknown definition type. Please check your tags" % (definition_name)) def_template = g_known_templates[definition_name] result_doc = {} for p in parameters: if p not in def_template: raise PhantomAWSException( 'InvalidParameterValue', details= "%s does not take a parameter of %s. Please check your tags" % (definition_name, p)) try: val = def_template[p](parameters[p]) except PhantomAWSException: raise except Exception: raise PhantomAWSException( 'InvalidParameterValue', details= "The tag %s has a value %s that could not be understood. Please check the type" % (p, parameters[p])) result_doc[p] = val return (definition_name, result_doc)
def get_aws_access_key(req): if 'AWSAccessKeyId' in req.params.keys(): log(logging.INFO, "AWSAccessKeyId is in the request parameter list using authentication method one") return _get_aws_access_key_method_one(req) if 'Authorization' in req.headers.keys(): log(logging.INFO, "Authorization is in the request parameter list using authentication method two") return _get_aws_access_key_method_two(req) raise PhantomAWSException('Unable to find the information needed to authenticate the user in the request')
def __call__(self, func): def wrapped(wsgiapp, req, *args, **kw): try: #user_obj = wsgiapp.get_user_obj(req) return func(wsgiapp, req, *args, **kw) except webob.exc.HTTPException, httpde: log(logging.INFO, "Application %s:%s received HTTP error %s" % (self._app_name, func.func_name, httpde), printstack=True) return httpde except Exception, ex: log(logging.ERROR, "Application %s:%s received Unknown error %s" % (self._app_name, func.func_name, ex), printstack=True) # convert to a http exception raise PhantomAWSException('InternalFailure', details=str(ex))
def authenticate_user(secret_key, req, access_dict): access_id = access_dict['AWSAccessKeyId'] signature = access_dict['Signature'] if not secret_key: log(logging.WARN, "The user %s was not found." % (access_id)) raise PhantomAWSException('InvalidClientTokenId') proper_signature = get_auth_hash(secret_key, req, access_dict) if signature != proper_signature: log(logging.WARN, "The signature for user %s was not correct." % (access_id)) raise PhantomAWSException('IncompleteSignature') # check the time expr_time = None nw = datetime.datetime.utcnow() delta = datetime.timedelta(seconds=60 * 15) if 'Expires' in access_dict.keys(): expr_time = _get_time(access_dict['Expires']) if expr_time > nw + delta: log(logging.WARN, "The request for user %s has an expiration time that is too far in the future." % (access_id)) raise PhantomAWSException('RequestExpired') if not expr_time and 'Timestamp' in access_dict.keys(): timestamp = _get_time(access_dict['Timestamp']) if timestamp > nw + delta: log(logging.WARN, "The request for user %s has a timestamp that is too far in the future." % (access_id)) raise PhantomAWSException('RequestExpired') expr_time = timestamp + delta if not expr_time: log(logging.WARN, "The request for user %s neither a timestamp nor expiration time." % (access_id)) raise PhantomAWSException('MissingParameter') if nw > expr_time: log(logging.WARN, "The request for user %s has no timestamp nor expiration time." % (access_id)) raise PhantomAWSException('RequestExpired')
def __init__(self, CFG): self._CFG = CFG self._logger = logging.getLogger("phantom") if self._CFG.phantom.authz.type == "simple_file": fname = self._CFG.phantom.authz.filename self._authz = SimpleFileDataStore(fname) elif self._CFG.phantom.authz.type == "cumulus": from pyhantom.authz.cumulus_sqlalch import CumulusDataStore dburl = self._CFG.phantom.authz.dburl self._authz = CumulusDataStore(dburl) elif self._CFG.phantom.authz.type == "sqldb": self._authz_sessionmaker = SimpleSQLSessionMaker(CFG.phantom.authz.dburl) else: raise PhantomAWSException('InternalFailure', details="Phantom authz module is not setup.") if self._CFG.phantom.system.type == "tester": self._system = TestSystem() elif self._CFG.phantom.system.type == "localdb": self._system = SystemLocalDB(self._CFG, log=self._logger) elif self._CFG.phantom.system.type == "epu_localdb": self._system = EPUSystemWithLocalDB(self._CFG) elif self._CFG.phantom.system.type == "epu": self._system = EPUSystem(self._CFG) else: raise PhantomAWSException('InternalFailure', details="Phantom authz module is not setup.") self.statsd_client = None try: if self._CFG.statsd is not None: host = self._CFG.statsd["host"] port = self._CFG.statsd["port"] self._logger.info("Setting up statsd client with host %s and port %d" % (host, port)) self.statsd_client = StatsClient(host, port) except AttributeError: # This means that there is not statsd block in the configuration pass except: self._logger.exception("Failed to set up statsd client")
def alter_autoscale_group(self, user_obj, name, new_conf, force): self._clean_up_db() asg = self._db.get_asg(user_obj, name) if not asg: raise PhantomAWSException('InvalidParameterValue', details="The name %s does not exists" % (asg.AutoScalingGroupName)) conf = {'engine_conf': {'preserve_n': new_conf['desired_capacity']}, } try: self._epum_client.reconfigure_domain(name, conf) except Exception, ex: raise
def validate_cloud(cloud_string): la = cloud_string.rpartition(":") if len(la) != 3 or la[1] != ":": raise PhantomAWSException( 'InvalidParameterValue', details= "The format is <cloud site name>:<integer size>. You sent %s" % (cloud_string)) (site_name, _, n_vms) = la try: int(n_vms) except Exception: raise PhantomAWSException( 'InvalidParameterValue', details= "The format is <cloud site name>:<integer size>. You sent %s" % (cloud_string)) result_doc = {'site_name': site_name, 'size': n_vms} return result_doc
def alter_autoscale_group(self, user_obj, name, new_conf, force): if name not in self._asgs: raise PhantomAWSException('InvalidParameterValue', details=name) asg = self._asgs[name] if 'desired_capacity' not in new_conf: return asg.DesiredCapacity = new_conf['desired_capacity'] global g_instance_registry # add instance up to that capacity for i in range(0, asg.DesiredCapacity): self._make_new_instance(asg)
def create_autoscale_group(self, user_obj, asg): log( logging.DEBUG, "entering create_autoscale_group with %s" % (asg.LaunchConfigurationName)) (definition_name, domain_opts) = tags_to_definition(asg.Tags.type_list) domain_opts['minimum_vms'] = asg.DesiredCapacity dt_name = asg.LaunchConfigurationName site_name = "" if dt_name.find('@') > 0: (dt_name, site_name) = _breakup_name(asg.LaunchConfigurationName) if definition_name == 'sensor_engine': domain_opts['deployable_type'] = dt_name domain_opts['dtname'] = dt_name domain_opts['iaas_site'] = site_name domain_opts['iaas_allocation'] = "m1.small" domain_opts['minimum_vms'] = asg.MinSize domain_opts['maximum_vms'] = asg.MaxSize domain_opts['opentsdb_host'] = self._opentsdb_host domain_opts['opentsdb_port'] = self._opentsdb_port else: domain_opts['dtname'] = dt_name domain_opts['minimum_vms'] = asg.MinSize domain_opts['maximum_vms'] = asg.MaxSize domain_opts['opentsdb_host'] = self._opentsdb_host domain_opts['opentsdb_port'] = self._opentsdb_port #domain_opts['force_site'] = site_name domain_opts['CreatedTime'] = make_time(asg.CreatedTime.date_time) domain_opts['AutoScalingGroupARN'] = asg.AutoScalingGroupARN domain_opts['VPCZoneIdentifier'] = asg.VPCZoneIdentifier domain_opts['HealthCheckType'] = asg.HealthCheckType domain_opts['PlacementGroup'] = asg.PlacementGroup conf = {'engine_conf': domain_opts} log(logging.INFO, "Creating autoscale group with %s" % (conf)) try: self._epum_client.add_domain(asg.AutoScalingGroupName, definition_name, conf, caller=user_obj.access_id) except DashiError, de: if de.exc_type == u'WriteConflictError': raise PhantomAWSException( 'InvalidParameterValue', details="auto scale name already exists") log(logging.ERROR, "An error creating ASG: %s" % (str(de))) raise
def terminate_instances(self, user_obj, instance_id, adjust_policy): global g_instance_registry if instance_id not in g_instance_registry: raise PhantomAWSException('InvalidParameterValue', details=instance_id) inst = g_instance_registry[instance_id] asg = self._asgs[inst.AutoScalingGroupName] asg.Instances.type_list.remove(inst) if adjust_policy: asg.DesiredCapacity = asg.DesiredCapacity - 1 else: self._make_new_instance(asg) del g_instance_registry[instance_id]
def delete_autoscale_group(self, user_obj, name, force): self._clean_up_db() asg = self._db.get_asg(user_obj, name) if not asg: raise PhantomAWSException('InvalidParameterValue', details="The name %s does not exists" % (name)) try: # clean up instances # epu_desc = self._epum_client.describe_domain(name) # inst_list = epu_desc['instances'] # for inst in inst_list: # if 'iaas_id' in inst['iaas_id']: # instance_id = inst['iaas_id'] # self._epum_client.remove_domain(name) except Exception, ex: raise
def get_autoscale_groups(self, user_obj, names=None, max=-1, startToken=None): self._clean_up_db() try: (asg_list_type, next_token) = SystemLocalDB.get_autoscale_groups(self, user_obj, names, max, startToken) epu_list = self._epum_client.list_domains() log(logging.DEBUG, "Incoming epu list is %s" %(str(epu_list))) # verify that the names are in thelist my_list = [] for grp in asg_list_type.type_list: if grp.AutoScalingGroupName not in epu_list: # perhaps all we should do here is log the error and remove the item from the DB # for now make it very obvious that this happened raise PhantomAWSException('InternalFailure', "%s is in the DB but the epu does not know about it" % (grp.AutoScalingGroupName)) epu_desc = self._epum_client.describe_domain(grp.AutoScalingGroupName) convert_epu_description_to_asg_out(epu_desc, grp) except Exception, ex: raise
def terminate_instances(self, user_obj, instance_id, adjust_policy): # gotta find the asg that has this instance id. this is a messed up part of the aws protocol log( logging.INFO, "epu_client:terminate_instances %s, adjust %s" % (instance_id, adjust_policy)) try: desc_t = self._find_group_by_instance(user_obj, instance_id) if desc_t is None: raise PhantomAWSException( 'InvalidParameterValue', details= "There is no domain associated with that instnace id") (name, desc, epu_instance_id) = desc_t conf = {'engine_conf': {'terminate': epu_instance_id}} if adjust_policy: desired_size = desc['config']['engine_conf']['minimum_vms'] if desired_size < 1: log(logging.WARN, "Trying to decrease the size lower than 0") desired_size = 0 else: desired_size = desired_size - 1 log(logging.INFO, "decreasing the desired_size to %d" % (desired_size)) conf['engine_conf']['minimum_vms'] = desired_size conf['engine_conf']['maximum_vms'] = desired_size log( logging.INFO, "calling reconfigure_domain with %s for user %s" % (str(conf), user_obj.access_id)) self._epum_client.reconfigure_domain(name, conf, caller=user_obj.access_id) except DashiError, de: log(logging.ERROR, "An error altering ASG: %s" % (str(de))) raise
def remove_user(self, access_key): db_obj = self._lookup_user(access_key) if not db_obj: raise PhantomAWSException('InvalidClientTokenId') self._session.delete(db_obj) return True