def main(): CONF = cfg.CONF cue_service.prepare_service(sys.argv) # Log configuration and other startup information LOG = log.getLogger(__name__) LOG.info(_LI("Starting cue-monitor")) LOG.info(_LI("Configuration:")) CONF.log_opt_values(LOG, logging.INFO) monitor = cue_monitor_service.MonitorService() launcher = openstack_service.launch(CONF, monitor) launcher.wait()
def main(): # Initialize environment CONF = cfg.CONF cue_service.prepare_service(sys.argv) # Log configuration and other startup information LOG = log.getLogger(__name__) LOG.info(_LI("Starting cue workers")) LOG.info(_LI("Configuration:")) CONF.log_opt_values(LOG, logging.INFO) cue_worker = tf_service.ConductorService.create("cue-worker") cue_worker.handle_signals() cue_worker.start()
def validate_token(auth_type, token): auth_validator = None if auth_type and auth_type.upper() == PLAIN_AUTH: auth_validator = PlainAuthTokenValidator(token=token) elif not auth_type: return AuthTokenValidator() else: LOG.info(_LI('Invalid authentication type: %s') % auth_type) return auth_validator
def main(): # Pase config file and command line options, then start logging cue_service.prepare_service(sys.argv) # Build and start the WSGI app host = CONF.api.host_ip port = CONF.api.port wsgi = simple_server.make_server( host, port, app.VersionSelectorApplication(), server_class=ThreadedSimpleServer) LOG = log.getLogger(__name__) LOG.info(_LI("Serving on http://%(host)s:%(port)s"), {'host': host, 'port': port}) LOG.info(_LI("Configuration:")) CONF.log_opt_values(LOG, logging.INFO) try: wsgi.serve_forever() except KeyboardInterrupt: # pragma: no cover pass
def validate(self): valid_username = False valid_password = False if self.token: if 'username' in self.token: if (self.token['username'] and (len(self.token['username']) >= MIN_USERNAME_LENGTH) and (len(self.token['username']) <= MAX_USERNAME_LENGTH)): valid_username = True else: LOG.info(_LI('Invalid username: %s') % self.token['username']) if 'password' in self.token: if (self.token['password'] and (len(self.token['password']) >= MIN_PASSWORD_LENGTH) and (len(self.token['password']) <= MAX_PASSWORD_LENGTH)): valid_password = True else: LOG.info(_LI('Invalid password')) return valid_username and valid_password
def init(default_rule=None): policy_file = cfg.CONF.find_file(cfg.CONF.policy_file) if len(policy_file) == 0: msg = 'Unable to determine appropriate policy json file' raise exception.ConfigurationError(msg) LOG.info(_LI('Using policy_file found at: %s') % policy_file) with open(policy_file) as fh: policy_string = fh.read() rules = policy.Rules.load_json(policy_string, default_rule=default_rule) global _ENFORCER if not _ENFORCER: LOG.debug("Enforcer is not present, recreating.") _ENFORCER = policy.Enforcer() _ENFORCER.set_rules(rules)
def init(default_rule=None): policy_files = utils.find_config(CONF['oslo_policy'].policy_file) if len(policy_files) == 0: msg = 'Unable to determine appropriate policy json file' raise exception.ConfigurationError(msg) LOG.info(_LI('Using policy_file found at: %s') % policy_files[0]) with open(policy_files[0]) as fh: policy_string = fh.read() rules = policy.Rules.load_json(policy_string, default_rule=default_rule) global _ENFORCER if not _ENFORCER: LOG.debug("Enforcer is not present, recreating.") _ENFORCER = policy.Enforcer(cfg.CONF) _ENFORCER.set_rules(rules)
def delete_complete_cluster(context, cluster_id): cluster_obj = objects.Cluster.get_cluster_by_id(context, cluster_id) target = {'tenant_id': cluster_obj.project_id} policy.check("cluster:delete", context, target) # update cluster to deleting objects.Cluster.update_cluster_deleting(context, cluster_id) # retrieve cluster nodes nodes = objects.Node.get_nodes_by_cluster_id(context, cluster_id) # create list with node id's for create cluster flow node_ids = [node.id for node in nodes] # retrieve cluster record cluster = objects.Cluster.get_cluster_by_id(context, cluster_id) # prepare and post cluster delete job to backend flow_kwargs = { 'cluster_id': cluster_id, 'node_ids': node_ids, 'group_id': cluster.group_id, } job_args = { 'context': context.to_dict(), } job_client = task_flow_client.get_client_instance() # TODO(dagnello): might be better to use request_id for job_uuid job_uuid = uuidutils.generate_uuid() job_client.post(delete_cluster, job_args, flow_kwargs=flow_kwargs, tx_uuid=job_uuid) LOG.info( _LI('Delete Cluster Request Cluster ID %(cluster_id)s Job ID ' '%(job_id)s') % ({ "cluster_id": cluster_id, "job_id": job_uuid }))
def post(self, data): """Create a new Cluster. :param data: cluster parameters within the request body. """ context = pecan.request.context request_data = data.as_dict() cluster_flavor = request_data['flavor'] if data.size <= 0: raise exception.Invalid(_("Invalid cluster size provided")) elif data.size > CONF.api.max_cluster_size: raise exception.RequestEntityTooLarge( _("Invalid cluster size, max size is: %d") % CONF.api.max_cluster_size) if len(data.network_id) > 1: raise exception.Invalid(_("Invalid number of network_id's")) # extract username/password if (data.authentication and data.authentication.type and data.authentication.token): auth_validator = auth_validate.AuthTokenValidator.validate_token( auth_type=data.authentication.type, token=data.authentication.token) if not auth_validator or not auth_validator.validate(): raise exception.Invalid( _("Invalid broker authentication " "parameter(s)")) else: raise exception.Invalid( _("Missing broker authentication " "parameter(s)")) default_rabbit_user = data.authentication.token['username'] default_rabbit_pass = data.authentication.token['password'] broker_name = CONF.default_broker_name # get the image id of default broker image_id = objects.BrokerMetadata.get_image_id_by_broker_name( context, broker_name) # validate cluster flavor self._validate_flavor(image_id, cluster_flavor) # convert 'network_id' from list to string type for objects/cluster # compatibility request_data['network_id'] = request_data['network_id'][0] # create new cluster object with required data from user new_cluster = objects.Cluster(**request_data) # create new cluster with node related data from user new_cluster.create(context) # retrieve cluster data cluster = get_complete_cluster(context, new_cluster.id) nodes = objects.Node.get_nodes_by_cluster_id(context, cluster.id) # create list with node id's for create cluster flow node_ids = [node.id for node in nodes] # prepare and post cluster create job to backend flow_kwargs = { 'cluster_id': cluster.id, 'node_ids': node_ids, 'user_network_id': cluster.network_id[0], 'management_network_id': CONF.management_network_id, } # generate unique erlang cookie to be used by all nodes in the new # cluster, erlang cookies are strings of up to 255 characters erlang_cookie = uuidutils.generate_uuid() job_args = { 'tenant_id': new_cluster.project_id, 'flavor': cluster.flavor, 'image': image_id, 'volume_size': cluster.volume_size, 'port': '5672', 'context': context.to_dict(), # TODO(sputnik13: this needs to come from the create request # and default to a configuration value rather than always using # config value 'security_groups': [CONF.os_security_group], 'port': CONF.rabbit_port, 'key_name': CONF.openstack.os_key_name, 'erlang_cookie': erlang_cookie, 'default_rabbit_user': default_rabbit_user, 'default_rabbit_pass': default_rabbit_pass, } job_client = task_flow_client.get_client_instance() # TODO(dagnello): might be better to use request_id for job_uuid job_uuid = uuidutils.generate_uuid() job_client.post(create_cluster, job_args, flow_kwargs=flow_kwargs, tx_uuid=job_uuid) LOG.info( _LI('Create Cluster Request Cluster ID %(cluster_id)s ' 'Cluster size %(size)s network ID %(network_id)s ' 'Job ID %(job_id)s Broker name %(broker_name)s') % ({ "cluster_id": cluster.id, "size": cluster.size, "network_id": cluster.network_id, "job_id": job_uuid, "broker_name": broker_name })) cluster.additional_information = [] cluster.additional_information.append( dict(def_rabbit_user=default_rabbit_user)) cluster.additional_information.append( dict(def_rabbit_pass=default_rabbit_pass)) cluster.unset_empty_fields() return cluster