def __call__(self, req): user_obj = self.get_user_obj(req) log_request(req, user_obj) input = CreateOrUpdateTagsInput() input.set_from_dict(req.params) # first we need to organize the tags by group association tag_groups = {} for tag in input.Tags: if tag.ResourceId not in tag_groups: tag_groups[tag.ResourceId] = [] l = tag_groups[tag.ResourceId] l.append(tag) for group_name in tag_groups: log(logging.INFO, "Processing tags for the group %s" % (group_name)) tags = tag_groups[group_name] (name, new_conf) = tags_to_definition(tags) self._system.alter_autoscale_group(user_obj, group_name, new_conf, force=True) res = self.get_response() doc = self.get_default_response_body_dom(doc_name="CreateOrUpdateTagsResponse") res.unicode_body = doc.documentElement.toprettyxml() log_reply(doc, user_obj) return res
def delete_autoscale_group(self, user_obj, name, force): try: log(logging.INFO, "deleting %s for user %s" % (str(name), user_obj.access_id)) self._epum_client.remove_domain(name, caller=user_obj.access_id) except DashiError, de: log(logging.ERROR, "An error altering ASG: %s" % (str(de))) raise
def __call__(self, req): user_obj = self.get_user_obj(req) log_request(req, user_obj) input = SetDesiredCapacityInput() input.set_from_dict(req.params) force = False if input.HonorCooldown: force = True new_conf = {'desired_capacity': input.DesiredCapacity} self._system.alter_autoscale_group(user_obj, input.AutoScalingGroupName, new_conf, force) res = self.get_response() doc = self.get_default_response_body_dom( doc_name="SetDesiredCapacityResponse") res.unicode_body = doc.documentElement.toprettyxml() log( logging.INFO, "User %s change %s capacity to %d" % (user_obj.access_id, input.AutoScalingGroupName, input.DesiredCapacity)) log_reply(doc, user_obj) return res
def __call__(self, req): user_obj = self.get_user_obj(req) log_request(req, user_obj) input = CreateOrUpdateTagsInput() input.set_from_dict(req.params) # first we need to organize the tags by group association tag_groups = {} for tag in input.Tags: if tag.ResourceId not in tag_groups: tag_groups[tag.ResourceId] = [] l = tag_groups[tag.ResourceId] l.append(tag) for group_name in tag_groups: log(logging.INFO, "Processing tags for the group %s" % (group_name)) tags = tag_groups[group_name] (name, new_conf) = tags_to_definition(tags) self._system.alter_autoscale_group(user_obj, group_name, new_conf, force=True) res = self.get_response() doc = self.get_default_response_body_dom( doc_name="CreateOrUpdateTagsResponse") res.unicode_body = doc.documentElement.toprettyxml() log_reply(doc, user_obj) return res
def __init__(self, cfg): ssl = cfg.phantom.system.rabbit_ssl self._rabbit = cfg.phantom.system.rabbit self._rabbit_port = cfg.phantom.system.rabbit_port self._rabbitpw = cfg.phantom.system.rabbit_pw self._rabbituser = cfg.phantom.system.rabbit_user self._rabbitexchange = cfg.phantom.system.rabbit_exchange log( logging.INFO, "Connecting to epu messaging fabric: %s, %s, XXXXX, %d, ssl=%s" % (self._rabbit, self._rabbituser, self._rabbit_port, str(ssl))) self._dashi_conn = DashiCeiConnection(self._rabbit, self._rabbituser, self._rabbitpw, exchange=self._rabbitexchange, timeout=60, port=self._rabbit_port, ssl=ssl) try: self._opentsdb_host = cfg.phantom.sensor.opentsdb.host except AttributeError: self._opentsdb_host = DEFAULT_OPENTSDB_HOST try: self._opentsdb_port = cfg.phantom.sensor.opentsdb.port except AttributeError: self._opentsdb_port = DEFAULT_OPENTSDB_PORT self._epum_client = EPUMClient(self._dashi_conn) self._dtrs_client = DTRSClient(self._dashi_conn) load_known_definitions(self._epum_client)
def call(sqlobj, *args,**kwargs): sqlobj._open_dbobj() try: return func(sqlobj, *args, **kwargs) except sqlalchemy.exc.SQLAlchemyError, ex: log(logging.ERROR, "A database error occurred while trying to access the user db %s" % (str(ex))) raise PhantomAWSException('InternalFailure', ex)
def terminate_instances(self, user_obj, instance_id, adjust_policy): # gotta find the asg that has this instance id. this is a messed up part of the aws protocol log(logging.INFO, "epu_client:terminate_instances %s, adjust %s" % (instance_id, adjust_policy)) try: desc_t = self._find_group_by_instance(user_obj, instance_id) if desc_t is None: raise PhantomAWSException('InvalidParameterValue', details="There is no domain associated with that instnace id") (name, desc, epu_instance_id) = desc_t conf = {'engine_conf': {'terminate': epu_instance_id} } if adjust_policy: desired_size = desc['config']['engine_conf']['minimum_vms'] if desired_size < 1: log(logging.WARN, "Trying to decrease the size lower than 0") desired_size = 0 else: desired_size = desired_size - 1 log(logging.INFO, "decreasing the desired_size to %d" % (desired_size)) conf['engine_conf']['minimum_vms'] = desired_size conf['engine_conf']['maximum_vms'] = desired_size log(logging.INFO, "calling reconfigure_domain with %s for user %s" % (str(conf), user_obj.access_id)) self._epum_client.reconfigure_domain(name, conf, caller=user_obj.access_id) except DashiError, de: log(logging.ERROR, "An error altering ASG: %s" % (str(de))) raise
def convert_epu_description_to_asg_out(desc, asg): inst_list = desc['instances'] name = desc['name'] config = desc['config'] log(logging.DEBUG, "Changing the config: %s" %(str(config))) #asg.DesiredCapacity = int(config['engine_conf']['preserve_n']) asg.Instances = AWSListType('Instances') for inst in inst_list: log(logging.DEBUG, "Converting instance %s" %(str(inst))) out_t = InstanceType('Instance') out_t.AutoScalingGroupName = name out_t.HealthStatus = _is_healthy(inst['state']) if 'state_desc' in inst and inst['state_desc'] is not None: out_t.HealthStatus = out_t.HealthStatus + " " + str(inst['state_desc']) out_t.LifecycleState = inst['state'] out_t.AvailabilityZone = inst['site'] out_t.LaunchConfigurationName = asg.LaunchConfigurationName if 'iaas_id' in inst: out_t.InstanceId = inst['iaas_id'] else: out_t.InstanceId = "" asg.Instances.type_list.append(out_t) return asg
def get_autoscale_groups(self, user_obj, names=None, max=-1, startToken=None): epu_list = self._epum_client.list_domains(caller=user_obj.access_id) log(logging.DEBUG, "Incoming epu list is %s" % (str(epu_list))) next_token = None epu_list.sort() asg_list_type = AWSListType('AutoScalingGroups') for asg_name in epu_list: if asg_list_type.get_length() >= max and max > -1: break if asg_name == startToken: startToken = None if startToken is None and (names is None or asg_name in names): asg_description = self._epum_client.describe_domain( asg_name, caller=user_obj.access_id) asg = convert_epu_description_to_asg_out( asg_description, asg_name) asg_list_type.add_item(asg) # XXX need to set next_token return (asg_list_type, next_token)
def call(sqlobj, *args, **kwargs): sqlobj._open_dbobj() try: return func(sqlobj, *args, **kwargs) except sqlalchemy.exc.SQLAlchemyError, ex: log( logging.ERROR, "A database error occurred while trying to access the user db %s" % (str(ex))) raise PhantomAWSException('InternalFailure', ex)
def get_user_object_by_display_name(self, display_name): try: q = self._session.query(PhantomUserDBObject) q = q.filter(PhantomUserDBObject.displayname==display_name) db_obj = q.first() if not db_obj: raise PhantomAWSException('InvalidClientTokenId') return PhantomUserObject(db_obj.access_key, db_obj.access_secret, db_obj.displayname) except sqlalchemy.exc.SQLAlchemyError, ex: log(logging.ERROR, "A database error occurred while trying to access the user db %s" % (str(ex))) raise PhantomAWSException('InternalFailure')
def _is_healthy(state): a = state.split('-') try: code = int(a[0]) if code > 600: return "Unhealthy" else: return "Healthy" except: log(logging.WARN, "A weird state was found %s" % (state)) return "Unhealthy"
def __init__(self, cfg): SystemLocalDB.__init__(self, cfg) ssl = cfg.phantom.system.rabbit_ssl self._rabbit = cfg.phantom.system.rabbit self._rabbit_port = cfg.phantom.system.rabbit_port self._rabbitpw = cfg.phantom.system.rabbit_pw self._rabbituser = cfg.phantom.system.rabbit_user self._rabbitexchange = cfg.phantom.system.rabbit_exchange log(logging.INFO, "Connecting to epu messaging fabric: %s, %s, XXXXX, %d, ssl=%s" % (self._rabbit, self._rabbituser, self._rabbit_port, str(ssl))) self._dashi_conn = DashiCeiConnection(self._rabbit, self._rabbituser, self._rabbitpw, exchange=self._rabbitexchange, timeout=60, port=self._rabbit_port, ssl=ssl) self._epum_client = EPUMClient(self._dashi_conn)
def alter_autoscale_group(self, user_obj, name, new_conf, force): conf = {'engine_conf': new_conf} engine_conf = conf['engine_conf'] if new_conf.get('desired_capacity') is not None: engine_conf['minimum_vms'] = new_conf.get('desired_capacity') engine_conf['maximum_vms'] = new_conf.get('desired_capacity') try: if engine_conf: self._epum_client.reconfigure_domain(name, conf, caller=user_obj.access_id) except DashiError, de: log(logging.ERROR, "An error altering ASG: %s" % (str(de))) raise
def get_user_object_by_display_name(self, display_name): try: q = self._session.query(PhantomUserDBObject) q = q.filter(PhantomUserDBObject.displayname == display_name) db_obj = q.first() if not db_obj: raise PhantomAWSException('InvalidClientTokenId') return PhantomUserObject(db_obj.access_key, db_obj.access_secret, db_obj.displayname) except sqlalchemy.exc.SQLAlchemyError, ex: log( logging.ERROR, "A database error occurred while trying to access the user db %s" % (str(ex))) raise PhantomAWSException('InternalFailure')
def convert_epu_description_to_asg_out(desc, name): log(logging.DEBUG, "conversion description: %s" % (str(desc))) config = desc['config']['engine_conf'] asg = AutoScalingGroupType('AutoScalingGroup') asg.AutoScalingGroupName = desc['name'] asg.DesiredCapacity = config['minimum_vms'] tm = _get_key_or_none(config, 'CreatedTime') if tm: tm = _get_time(config['CreatedTime']) asg.CreatedTime = DateTimeType('CreatedTime', tm) asg.AutoScalingGroupARN = _get_key_or_none(config, 'AutoScalingGroupARN') asg.AvailabilityZones = AWSListType('AvailabilityZones') dt_name = config.get('dtname') if dt_name is None: dt_name = config.get('deployable_type') asg.HealthCheckType = _get_key_or_none(config, 'HealthCheckType') asg.LaunchConfigurationName = "%s" % (dt_name) asg.MaxSize = config.get('maximum_vms') if asg.MaxSize is None: asg.MaxSize = config.get('maximum_vms') asg.MinSize = config.get('minimum_vms') if asg.MinSize is None: asg.MinSize = config.get('minimum_vms') asg.PlacementGroup = _get_key_or_none(config, 'PlacementGroup') #asg.Status asg.VPCZoneIdentifier = _get_key_or_none(config, 'VPCZoneIdentifier') asg.EnabledMetrics = AWSListType('EnabledMetrics') asg.HealthCheckGracePeriod = 0 asg.LoadBalancerNames = AWSListType('LoadBalancerNames') asg.SuspendedProcesses = AWSListType('SuspendedProcesses') asg.Tags = AWSListType('Tags') asg.Cooldown = 0 inst_list = desc['instances'] asg.Instances = AWSListType('Instances') for inst in inst_list: out_t = convert_instance_type(name, inst) asg.Instances.type_list.append(out_t) return asg
def convert_epu_description_to_asg_out(desc, name): log(logging.DEBUG, "conversion description: %s" %(str(desc))) config = desc['config']['engine_conf'] asg = AutoScalingGroupType('AutoScalingGroup') asg.AutoScalingGroupName = desc['name'] asg.DesiredCapacity = config['minimum_vms'] tm = _get_key_or_none(config, 'CreatedTime') if tm: tm = _get_time(config['CreatedTime']) asg.CreatedTime = DateTimeType('CreatedTime', tm) asg.AutoScalingGroupARN = _get_key_or_none(config, 'AutoScalingGroupARN') asg.AvailabilityZones = AWSListType('AvailabilityZones') dt_name = config.get('dtname') if dt_name is None: dt_name = config.get('deployable_type') asg.HealthCheckType = _get_key_or_none(config, 'HealthCheckType') asg.LaunchConfigurationName = "%s" % (dt_name) asg.MaxSize = config.get('maximum_vms') if asg.MaxSize is None: asg.MaxSize = config.get('maximum_vms') asg.MinSize = config.get('minimum_vms') if asg.MinSize is None: asg.MinSize = config.get('minimum_vms') asg.PlacementGroup = _get_key_or_none(config,'PlacementGroup') #asg.Status asg.VPCZoneIdentifier = _get_key_or_none(config,'VPCZoneIdentifier') asg.EnabledMetrics = AWSListType('EnabledMetrics') asg.HealthCheckGracePeriod = 0 asg.LoadBalancerNames = AWSListType('LoadBalancerNames') asg.SuspendedProcesses = AWSListType('SuspendedProcesses') asg.Tags = AWSListType('Tags') asg.Cooldown = 0 inst_list = desc['instances'] asg.Instances = AWSListType('Instances') for inst in inst_list: out_t = convert_instance_type(name, inst) asg.Instances.type_list.append(out_t) return asg
def convert_instance_type(name, inst): log(logging.DEBUG, "Converting instance %s" % (str(inst))) out_t = InstanceType('Instance') out_t.AutoScalingGroupName = name out_t.HealthStatus = _is_healthy(inst['state']) if 'state_desc' in inst and inst['state_desc'] is not None: out_t.HealthStatus = out_t.HealthStatus + " " + str(inst['state_desc']) out_t.LifecycleState = inst['state'] out_t.AvailabilityZone = inst['site'] out_t.LaunchConfigurationName = inst['deployable_type'] if 'iaas_id' in inst: out_t.InstanceId = inst['iaas_id'] else: out_t.InstanceId = "" return out_t
def convert_instance_type(name, inst): log(logging.DEBUG, "Converting instance %s" %(str(inst))) out_t = InstanceType('Instance') out_t.AutoScalingGroupName = name out_t.HealthStatus = _is_healthy(inst['state']) if 'state_desc' in inst and inst['state_desc'] is not None: out_t.HealthStatus = out_t.HealthStatus + " " + str(inst['state_desc']) out_t.LifecycleState = inst['state'] out_t.AvailabilityZone = inst['site'] out_t.LaunchConfigurationName = inst['deployable_type'] if 'iaas_id' in inst: out_t.InstanceId = inst['iaas_id'] else: out_t.InstanceId = "" return out_t
def get_autoscale_groups(self, user_obj, names=None, max=-1, startToken=None): self._clean_up_db() try: (asg_list_type, next_token) = SystemLocalDB.get_autoscale_groups(self, user_obj, names, max, startToken) epu_list = self._epum_client.list_domains() log(logging.DEBUG, "Incoming epu list is %s" %(str(epu_list))) # verify that the names are in thelist my_list = [] for grp in asg_list_type.type_list: if grp.AutoScalingGroupName not in epu_list: # perhaps all we should do here is log the error and remove the item from the DB # for now make it very obvious that this happened raise PhantomAWSException('InternalFailure', "%s is in the DB but the epu does not know about it" % (grp.AutoScalingGroupName)) epu_desc = self._epum_client.describe_domain(grp.AutoScalingGroupName) convert_epu_description_to_asg_out(epu_desc, grp) except Exception, ex: raise
def terminate_instances(self, user_obj, instance_id, adjust_policy): # gotta find the asg that has this instance id. this is a messed up part of the aws protocol log( logging.INFO, "epu_client:terminate_instances %s, adjust %s" % (instance_id, adjust_policy)) try: desc_t = self._find_group_by_instance(user_obj, instance_id) if desc_t is None: raise PhantomAWSException( 'InvalidParameterValue', details= "There is no domain associated with that instnace id") (name, desc, epu_instance_id) = desc_t conf = {'engine_conf': {'terminate': epu_instance_id}} if adjust_policy: desired_size = desc['config']['engine_conf']['minimum_vms'] if desired_size < 1: log(logging.WARN, "Trying to decrease the size lower than 0") desired_size = 0 else: desired_size = desired_size - 1 log(logging.INFO, "decreasing the desired_size to %d" % (desired_size)) conf['engine_conf']['minimum_vms'] = desired_size conf['engine_conf']['maximum_vms'] = desired_size log( logging.INFO, "calling reconfigure_domain with %s for user %s" % (str(conf), user_obj.access_id)) self._epum_client.reconfigure_domain(name, conf, caller=user_obj.access_id) except DashiError, de: log(logging.ERROR, "An error altering ASG: %s" % (str(de))) raise
def create_autoscale_group(self, user_obj, asg): self._clean_up_db() # call the parent class (db_asg, db_lc) = self._create_autoscale_group(user_obj, asg) global g_add_template conf = g_add_template.copy() conf['engine_conf']['preserve_n'] = asg.DesiredCapacity conf['engine_conf']['epuworker_image_id'] = db_lc.ImageId conf['engine_conf']['epuworker_allocation'] = db_lc.InstanceType conf['engine_conf']['iaas_key'] = user_obj.access_id conf['engine_conf']['iaas_secret'] = user_obj.secret_key conf['engine_conf']['iaas_site'] = db_asg.AvailabilityZones + "-" + user_obj.access_id conf['engine_conf']['iaas_allocation'] = db_lc.InstanceType log(logging.INFO, "Creating autoscale group with %s" % (conf)) try: self._epum_client.add_domain(asg.AutoScalingGroupName, conf) except Exception, ex: raise
def __call__(self, req): user_obj = self.get_user_obj(req) log_request(req, user_obj) input = SetDesiredCapacityInput() input.set_from_dict(req.params) force = False if input.HonorCooldown: force = True new_conf = {'desired_capacity': input.DesiredCapacity} self._system.alter_autoscale_group(user_obj, input.AutoScalingGroupName, new_conf, force) res = self.get_response() doc = self.get_default_response_body_dom(doc_name="SetDesiredCapacityResponse") res.unicode_body = doc.documentElement.toprettyxml() log(logging.INFO, "User %s change %s capacity to %d" % (user_obj.access_id, input.AutoScalingGroupName, input.DesiredCapacity)) log_reply(doc, user_obj) return res
def __call__(self, req): before = time.time() user_obj = None request_id = str(uuid.uuid4()) try: log(logging.INFO, "%s Enter main router | %s" % (request_id, str(req.params))) authz = self._cfg.get_authz() access_dict = get_aws_access_key(req) user_obj = authz.get_user_object_by_access_id(access_dict['AWSAccessKeyId']) authenticate_user(user_obj.secret_key, req, access_dict) key = 'Action' if key not in req.params.keys(): raise PhantomAWSException('InvalidParameterValue') action = req.params['Action'] global _action_to_application_map if action not in _action_to_application_map: raise webob.exc.HTTPNotFound("No action %s" % action) app_cls = _action_to_application_map[action] log(logging.INFO, "%s Getting phantom action %s" % (request_id, action)) app = app_cls(action, cfg=self._cfg) except Exception, ex: log(logging.ERROR, "%s Exiting main router with error %s" % (request_id, str(ex))) raise
def get_autoscale_groups(self, user_obj, names=None, max=-1, startToken=None): epu_list = self._epum_client.list_domains(caller=user_obj.access_id) log(logging.DEBUG, "Incoming epu list is %s" %(str(epu_list))) next_token = None epu_list.sort() asg_list_type = AWSListType('AutoScalingGroups') for asg_name in epu_list: if asg_list_type.get_length() >= max and max > -1: break if asg_name == startToken: startToken = None if startToken is None and (names is None or asg_name in names): asg_description = self._epum_client.describe_domain(asg_name, caller=user_obj.access_id) asg = convert_epu_description_to_asg_out(asg_description, asg_name) asg_list_type.add_item(asg) # XXX need to set next_token return (asg_list_type, next_token)
def __init__(self, cfg): ssl = cfg.phantom.system.rabbit_ssl self._rabbit = cfg.phantom.system.rabbit self._rabbit_port = cfg.phantom.system.rabbit_port self._rabbitpw = cfg.phantom.system.rabbit_pw self._rabbituser = cfg.phantom.system.rabbit_user self._rabbitexchange = cfg.phantom.system.rabbit_exchange log(logging.INFO, "Connecting to epu messaging fabric: %s, %s, XXXXX, %d, ssl=%s" % (self._rabbit, self._rabbituser, self._rabbit_port, str(ssl))) self._dashi_conn = DashiCeiConnection(self._rabbit, self._rabbituser, self._rabbitpw, exchange=self._rabbitexchange, timeout=60, port=self._rabbit_port, ssl=ssl) try: self._opentsdb_host = cfg.phantom.sensor.opentsdb.host except AttributeError: self._opentsdb_host = DEFAULT_OPENTSDB_HOST try: self._opentsdb_port = cfg.phantom.sensor.opentsdb.port except AttributeError: self._opentsdb_port = DEFAULT_OPENTSDB_PORT self._epum_client = EPUMClient(self._dashi_conn) self._dtrs_client = DTRSClient(self._dashi_conn) load_known_definitions(self._epum_client)
def _clean_up_db(self): try: epu_list = self._epum_client.list_domains() asgs = self._db.get_asgs(None) for asg in asgs: if asg.AutoScalingGroupName not in epu_list: log(logging.ERROR, "Cleaning up an ASG that is in the database and not in the epu list: %s" % (asg.AutoScalingGroupName)) self._db.delete_asg(asg) self._db.db_commit() log(logging.INFO, "Object %s has been deleted" % (str(asg))) except Exception, ex: log(logging.ERROR, "An error occurred while attempting to clean up the DB : %s" % (str(ex)))
def create_autoscale_group(self, user_obj, asg): log( logging.DEBUG, "entering create_autoscale_group with %s" % (asg.LaunchConfigurationName)) (definition_name, domain_opts) = tags_to_definition(asg.Tags.type_list) domain_opts['minimum_vms'] = asg.DesiredCapacity dt_name = asg.LaunchConfigurationName site_name = "" if dt_name.find('@') > 0: (dt_name, site_name) = _breakup_name(asg.LaunchConfigurationName) if definition_name == 'sensor_engine': domain_opts['deployable_type'] = dt_name domain_opts['dtname'] = dt_name domain_opts['iaas_site'] = site_name domain_opts['iaas_allocation'] = "m1.small" domain_opts['minimum_vms'] = asg.MinSize domain_opts['maximum_vms'] = asg.MaxSize domain_opts['opentsdb_host'] = self._opentsdb_host domain_opts['opentsdb_port'] = self._opentsdb_port else: domain_opts['dtname'] = dt_name domain_opts['minimum_vms'] = asg.MinSize domain_opts['maximum_vms'] = asg.MaxSize domain_opts['opentsdb_host'] = self._opentsdb_host domain_opts['opentsdb_port'] = self._opentsdb_port #domain_opts['force_site'] = site_name domain_opts['CreatedTime'] = make_time(asg.CreatedTime.date_time) domain_opts['AutoScalingGroupARN'] = asg.AutoScalingGroupARN domain_opts['VPCZoneIdentifier'] = asg.VPCZoneIdentifier domain_opts['HealthCheckType'] = asg.HealthCheckType domain_opts['PlacementGroup'] = asg.PlacementGroup conf = {'engine_conf': domain_opts} log(logging.INFO, "Creating autoscale group with %s" % (conf)) try: self._epum_client.add_domain(asg.AutoScalingGroupName, definition_name, conf, caller=user_obj.access_id) except DashiError, de: if de.exc_type == u'WriteConflictError': raise PhantomAWSException( 'InvalidParameterValue', details="auto scale name already exists") log(logging.ERROR, "An error creating ASG: %s" % (str(de))) raise
def create_autoscale_group(self, user_obj, asg): log(logging.DEBUG, "entering create_autoscale_group with %s" % (asg.LaunchConfigurationName)) (definition_name, domain_opts) = tags_to_definition(asg.Tags.type_list) domain_opts['minimum_vms'] = asg.DesiredCapacity dt_name = asg.LaunchConfigurationName site_name = "" if dt_name.find('@') > 0: (dt_name, site_name) = _breakup_name(asg.LaunchConfigurationName) if definition_name == 'sensor_engine': domain_opts['deployable_type'] = dt_name domain_opts['dtname'] = dt_name domain_opts['iaas_site'] = site_name domain_opts['iaas_allocation'] = "m1.small" domain_opts['minimum_vms'] = asg.MinSize domain_opts['maximum_vms'] = asg.MaxSize domain_opts['opentsdb_host'] = self._opentsdb_host domain_opts['opentsdb_port'] = self._opentsdb_port else: domain_opts['dtname'] = dt_name domain_opts['minimum_vms'] = asg.MinSize domain_opts['maximum_vms'] = asg.MaxSize domain_opts['opentsdb_host'] = self._opentsdb_host domain_opts['opentsdb_port'] = self._opentsdb_port #domain_opts['force_site'] = site_name domain_opts['CreatedTime'] = make_time(asg.CreatedTime.date_time) domain_opts['AutoScalingGroupARN'] = asg.AutoScalingGroupARN domain_opts['VPCZoneIdentifier'] = asg.VPCZoneIdentifier domain_opts['HealthCheckType'] = asg.HealthCheckType domain_opts['PlacementGroup'] = asg.PlacementGroup conf = {'engine_conf': domain_opts} log(logging.INFO, "Creating autoscale group with %s" % (conf)) try: self._epum_client.add_domain(asg.AutoScalingGroupName, definition_name, conf, caller=user_obj.access_id) except DashiError, de: if de.exc_type == u'WriteConflictError': raise PhantomAWSException('InvalidParameterValue', details="auto scale name already exists") log(logging.ERROR, "An error creating ASG: %s" % (str(de))) raise
action = req.params['Action'] global _action_to_application_map if action not in _action_to_application_map: raise webob.exc.HTTPNotFound("No action %s" % action) app_cls = _action_to_application_map[action] log(logging.INFO, "%s Getting phantom action %s" % (request_id, action)) app = app_cls(action, cfg=self._cfg) except Exception, ex: log(logging.ERROR, "%s Exiting main router with error %s" % (request_id, str(ex))) raise finally: #if user_obj: # user_obj.close() after = time.time() if self._cfg.statsd_client is not None: self._cfg.statsd_client.incr('autoscale.MainRouter.count') self._cfg.statsd_client.timing('autoscale.MainRouter.timing', (after - before) * 1000) pass log(logging.INFO, "%s Exiting main router" % (request_id)) return app if __name__ == '__main__': httpd = make_server('127.0.0.1', 8445, MainRouter()) httpd.serve_forever()