def _on_config_DELETE_SQL_CONNECTION_POOL(self, params): """ Deletes the SQL connection factory. """ # Do not allow for any reads or updates during the pools are being updated. self.is_get_allowed.clear() try: pool_name = params["pool_name"] new_pool_list = copy.deepcopy(self.pool_list) del new_pool_list[pool_name] # First save the changes on-disk.. if not self.create_sa_engines: self.config_repo_manager.update_sql_pool_list(new_pool_list) # .. close the pool.. if self.create_sa_engines: self.engines[pool_name].dispose() # .. and update the list of pools and engines available. del self.engines[pool_name] self.pool_list = new_pool_list except Exception, e: msg = "Could not delete the SQL connection pool, params=[%s], e=[%s]" % (pprint(params), format_exc()) self.logger.error(msg) raise ZatoException(msg)
def _update(self, top_level_elem, elems, location, before_commit_msg, commit_msg, after_commit_msg): """ A common utility method for updating a YAML file in the server's Bazaar repo. """ # TODO: Commit message could be a tad smarter and might include some # hints as to what gets committed to repo. # TODO: Move it elsewhere, to a separate 'init' method (or investigage # why creating the 'tree' object in 'ensure_repo_consistency is not # enough - possibly because we're using subprocesses and our own process # is not the same that ensure_repo_consistency has been called in). if not hasattr(self, "tree"): self.tree = WorkingTree.open(self.repo_location) data = {} data[top_level_elem] = elems data_pprinted = pprint(data) output = dump(data, Dumper=Dumper, default_flow_style=False) logger.debug(before_commit_msg) logger.debug("data_pprinted=[%s], output=[%s], location=[%s]" % (data_pprinted, output, location)) open(location, "w").write(output) self.tree.commit(commit_msg) logger.debug(after_commit_msg)
def handle(self, *args, **kwargs): payload = kwargs.get("payload") request_params = ["pool_name", "engine", "user", "host", "db_name", "pool_size"] new_params = _get_params(payload, request_params, "pool.") new_params["extra"] = _parse_extra_params(payload) new_params_pprinted = pprint(new_params) self.logger.debug("Will create an SQL connection pool, new_params=[%s]" % new_params_pprinted) result, response = self.server.send_config_request("CREATE_SQL_CONNECTION_POOL", new_params, timeout=6.0) self.logger.log(TRACE1, "result=[%s], response=[%s]" % (result, response)) return result, response
def index(req): try: jobs = [] # Build a list of schedulers for a given Zato cluster. if req.zato.cluster_id and req.method == 'GET': # We have a server to pick the schedulers from, try to invoke it now. response = req.zato.client.invoke( 'zato.scheduler.job.get-list', {'cluster_id': req.zato.cluster_id}) if response.has_data: for job_elem in response.data: id = job_elem.id name = job_elem.name is_active = job_elem.is_active job_type = job_elem.job_type start_date = job_elem.start_date service_name = job_elem.service_name extra = job_elem.extra job_type_friendly = job_type_friendly_names[job_type] job = Job(id, name, is_active, job_type, from_utc_to_user(start_date + '+00:00', req.zato.user_profile), extra, service_name=service_name, job_type_friendly=job_type_friendly) if job_type == SCHEDULER.JOB_TYPE.ONE_TIME: definition_text = _one_time_job_def( req.zato.user_profile, start_date) elif job_type == SCHEDULER.JOB_TYPE.INTERVAL_BASED: definition_text = _interval_based_job_def( req.zato.user_profile, _get_start_date(job_elem.start_date), job_elem.repeats, job_elem.weeks, job_elem.days, job_elem.hours, job_elem.minutes, job_elem.seconds) weeks = job_elem.weeks or '' days = job_elem.days or '' hours = job_elem.hours or '' minutes = job_elem.minutes or '' seconds = job_elem.seconds or '' repeats = job_elem.repeats or '' ib_job = IntervalBasedJob(None, None, weeks, days, hours, minutes, seconds, repeats) job.interval_based = ib_job elif job_type == SCHEDULER.JOB_TYPE.CRON_STYLE: cron_definition = job_elem.cron_definition or '' definition_text = _cron_style_job_def( req.zato.user_profile, start_date, cron_definition) cs_job = CronStyleJob(None, None, cron_definition) job.cron_style = cs_job else: msg = 'Unrecognized job type, name:[{0}], type:[{1}]'.format( name, job_type) logger.error(msg) raise ZatoException(msg) job.definition_text = definition_text jobs.append(job) else: logger.info('No jobs found, response:[{}]'.format(response)) if req.method == 'POST': action = req.POST.get('zato_action', '') if not action: msg = 'req.POST contains no [zato_action] parameter.' logger.error(msg) return HttpResponseServerError(msg) job_type = req.POST.get('job_type', '') if action != 'execute' and not job_type: msg = 'req.POST contains no [job_type] parameter.' logger.error(msg) return HttpResponseServerError(msg) job_name = req.POST['{0}-{1}-name'.format(action, job_type)] # Try to match the action and a job type with an action handler.. handler_name = '_' + action if action != 'execute': handler_name += '_' + job_type handler = globals().get(handler_name) if not handler: msg = ('No handler found for action [{0}], job_type:[{1}], ' 'req.POST:[{2}], req.GET:[{3}].'.format( action, job_type, pprint(req.POST), pprint(req.GET))) logger.error(msg) return HttpResponseServerError(msg) # .. invoke the action handler. try: response = handler(req.zato.client, req.zato.user_profile, req.zato.cluster, req.POST) response = response if response else '' if response: response['message'] = _get_success_message( action, job_type, job_name) response = dumps(response) return HttpResponse(response, content_type='application/javascript') except Exception, e: msg = ('Could not invoke action [%s], job_type:[%s], e:[%s]' 'req.POST:[%s], req.GET:[%s]') % ( action, job_type, format_exc(), pprint( req.POST), pprint(req.GET)) logger.error(msg) return HttpResponseServerError(msg) return_data = { 'zato_clusters': req.zato.clusters, 'cluster_id': req.zato.cluster_id, 'choose_cluster_form': req.zato.choose_cluster_form, 'jobs': jobs, 'friendly_names': job_type_friendly_names.items(), 'create_one_time_form': OneTimeSchedulerJobForm(create_one_time_prefix, req), 'create_interval_based_form': IntervalBasedSchedulerJobForm(create_interval_based_prefix, req), 'create_cron_style_form': CronStyleSchedulerJobForm(create_cron_style_prefix, req), 'edit_one_time_form': OneTimeSchedulerJobForm(edit_one_time_prefix, req), 'edit_interval_based_form': IntervalBasedSchedulerJobForm(edit_interval_based_prefix, req), 'edit_cron_style_form': CronStyleSchedulerJobForm(edit_cron_style_prefix, req), 'sample_dt': get_sample_dt(req.zato.user_profile), } return_data.update(get_js_dt_format(req.zato.user_profile)) return TemplateResponse(req, 'zato/scheduler.html', return_data)
def index(req): try: jobs = [] # Build a list of schedulers for a given Zato cluster. if req.zato.cluster_id and req.method == 'GET': # We have a server to pick the schedulers from, try to invoke it now. response = req.zato.client.invoke('zato.scheduler.job.get-list', {'cluster_id': req.zato.cluster_id}) if response.has_data: for job_elem in response.data: id = job_elem.id name = job_elem.name is_active = job_elem.is_active job_type = job_elem.job_type start_date = job_elem.start_date service_name = job_elem.service_name extra = job_elem.extra job_type_friendly = job_type_friendly_names[job_type] job = Job(id, name, is_active, job_type, from_utc_to_user(start_date+'+00:00', req.zato.user_profile), extra, service_name=service_name, job_type_friendly=job_type_friendly) if job_type == SCHEDULER.JOB_TYPE.ONE_TIME: definition_text=_one_time_job_def(req.zato.user_profile, start_date) elif job_type == SCHEDULER.JOB_TYPE.INTERVAL_BASED: definition_text = _interval_based_job_def(req.zato.user_profile, _get_start_date(job_elem.start_date), job_elem.repeats, job_elem.weeks, job_elem.days, job_elem.hours, job_elem.minutes, job_elem.seconds) weeks = job_elem.weeks or '' days = job_elem.days or '' hours = job_elem.hours or '' minutes = job_elem.minutes or '' seconds = job_elem.seconds or '' repeats = job_elem.repeats or '' ib_job = IntervalBasedJob(None, None, weeks, days, hours, minutes, seconds, repeats) job.interval_based = ib_job elif job_type == SCHEDULER.JOB_TYPE.CRON_STYLE: cron_definition = job_elem.cron_definition or '' definition_text=_cron_style_job_def(req.zato.user_profile, start_date, cron_definition) cs_job = CronStyleJob(None, None, cron_definition) job.cron_style = cs_job else: msg = 'Unrecognized job type, name:[{0}], type:[{1}]'.format(name, job_type) logger.error(msg) raise ZatoException(msg) job.definition_text = definition_text jobs.append(job) else: logger.info('No jobs found, response:[{}]'.format(response)) if req.method == 'POST': action = req.POST.get('zato_action', '') if not action: msg = 'req.POST contains no [zato_action] parameter.' logger.error(msg) return HttpResponseServerError(msg) job_type = req.POST.get('job_type', '') if action != 'execute' and not job_type: msg = 'req.POST contains no [job_type] parameter.' logger.error(msg) return HttpResponseServerError(msg) job_name = req.POST['{0}-{1}-name'.format(action, job_type)] # Try to match the action and a job type with an action handler.. handler_name = '_' + action if action != 'execute': handler_name += '_' + job_type handler = globals().get(handler_name) if not handler: msg = ('No handler found for action [{0}], job_type:[{1}], ' 'req.POST:[{2}], req.GET:[{3}].'.format(action, job_type, pprint(req.POST), pprint(req.GET))) logger.error(msg) return HttpResponseServerError(msg) # .. invoke the action handler. try: response = handler(req.zato.client, req.zato.user_profile, req.zato.cluster, req.POST) response = response if response else '' if response: response['message'] = _get_success_message(action, job_type, job_name) response = dumps(response) return HttpResponse(response, mimetype='application/javascript') except Exception, e: msg = ('Could not invoke action [%s], job_type:[%s], e:[%s]' 'req.POST:[%s], req.GET:[%s]') % (action, job_type, format_exc(), pprint(req.POST), pprint(req.GET)) logger.error(msg) return HttpResponseServerError(msg) return_data = {'zato_clusters':req.zato.clusters, 'cluster_id':req.zato.cluster_id, 'choose_cluster_form':req.zato.choose_cluster_form, 'jobs':jobs, 'friendly_names':job_type_friendly_names.items(), 'create_one_time_form':OneTimeSchedulerJobForm(create_one_time_prefix, req), 'create_interval_based_form':IntervalBasedSchedulerJobForm(create_interval_based_prefix, req), 'create_cron_style_form':CronStyleSchedulerJobForm(create_cron_style_prefix, req), 'edit_one_time_form':OneTimeSchedulerJobForm(edit_one_time_prefix, req), 'edit_interval_based_form':IntervalBasedSchedulerJobForm(edit_interval_based_prefix, req), 'edit_cron_style_form':CronStyleSchedulerJobForm(edit_cron_style_prefix, req), 'sample_dt': get_sample_dt(req.zato.user_profile), } return_data.update(get_js_dt_format(req.zato.user_profile)) return TemplateResponse(req, 'zato/scheduler.html', return_data)
def _on_config_EDIT_SQL_CONNECTION_POOL(self, params): """ Changes the parameters of an SQL connection pool. Unless only a change of the connection name is requested, an old connection pool is disposed of and a new one is created. """ # Do not allow for any reads or updates during the pools are being updated. self.is_get_allowed.clear() try: original_pool_name = params["original_pool_name"] pool_name = params["pool_name"] engine = params["engine"] user = params["user"] host = params["host"] db_name = params["db_name"] pool_size = int(params["pool_size"]) extra = params.get("extra", {}) extra = dict((str(key), extra[key]) for key in extra) old_pool = self.engines[original_pool_name] old_params = self.pool_list[original_pool_name] old_extra = old_params.get("extra", "") new_pool_list = copy.deepcopy(self.pool_list) pool_renamed = original_pool_name != pool_name # Are we changing the name of a pool only? if pool_renamed and ( engine == old_params["engine"] and user == old_params["user"] and host == old_params["host"] and db_name == old_params["db_name"] and int(pool_size) == old_params["pool_size"] and extra == old_extra): self.logger.debug("Renaming SQL connection pool from [%s] to [%s]" % (original_pool_name, pool_name)) new_pool_list[pool_name] = new_pool_list.pop(original_pool_name) # First save the changes on-disk. if not self.create_sa_engines: self.config_repo_manager.update_sql_pool_list(new_pool_list) self.engines[pool_name] = self.engines.pop(original_pool_name) self.pool_list = new_pool_list self.logger.info("SQL connection pool renamed from [%s] to [%s]" % (original_pool_name, pool_name)) # .. nope, we need to create a new one with updated parameters. else: self.logger.debug("About to create a new pool with updated parameters.") password = old_params["password"] if password: password_decrypted = str(self.crypto_manager.decrypt(password)) else: password_decrypted = "" new_params = copy.deepcopy(params) new_params["password"] = password_decrypted if pool_renamed: new_pool_name = pool_name new_pool_list.pop(original_pool_name) new_pool_list[new_pool_name] = {} else: new_pool_name = original_pool_name new_pool_list[new_pool_name]["engine"] = engine new_pool_list[new_pool_name]["user"] = user new_pool_list[new_pool_name]["password"] = password new_pool_list[new_pool_name]["host"] = host new_pool_list[new_pool_name]["db_name"] = db_name new_pool_list[new_pool_name]["pool_size"] = pool_size new_pool_list[new_pool_name]["extra"] = extra new_engine_url = engine_def.substitute(new_params) # First save the changes on-disk. if not self.create_sa_engines: # It's needed here to catch any incorrect extra arguments # passed in the URL. It will raise an exception if SingletonServer # is trying to create such an incorrect engine definition # and the request to create it will never reach the ParallelServers. create_engine(new_engine_url, pool_size=pool_size, **extra) self.config_repo_manager.update_sql_pool_list(new_pool_list) # .. dispose of the old engine. if self.create_sa_engines: self.engines[original_pool_name].dispose() new_engine = create_engine(new_engine_url, pool_size=pool_size, **extra) else: new_engine = _EngineInfo(new_engine_url, new_params) # .. are the new parameters to be saved under the same pool name? if pool_renamed: self.engines.pop(original_pool_name) self.engines[pool_name] = new_engine else: self.engines[original_pool_name] = new_engine # .. update the list of available pools. self.pool_list = new_pool_list except Exception, e: msg = "Could not update SQL connection pool, params=[%s], e=[%s]" % (pprint(params), format_exc()) self.logger.error(msg) raise ZatoException(msg)
def _on_config_CREATE_SQL_CONNECTION_POOL(self, params): """ Creates a new SQL connection factory. """ # Do not allow for any reads or updates during the pools are being updated. self.is_get_allowed.clear() try: pool_name = params["pool_name"] engine = params["engine"] user = params["user"] host = params["host"] db_name = params["db_name"] pool_size = int(params["pool_size"]) extra = params.get("extra", {}) extra = dict((str(key), extra[key]) for key in extra) # The only place where we can check it, while no other updates # are allowed. pool_exists = self.pool_list.get(pool_name) if pool_exists: msg = "SQL connection pool [%s] already exists, list_id=[%s]." % (pool_name, id(self.pool_list)) self.logger.error(msg) raise ZatoException(msg) else: msg = "SQL connection pool [%s] doesn't exist yet, list_id=[%s]." % (pool_name, id(self.pool_list)) self.logger.log(TRACE1, msg) new_pool_list = copy.deepcopy(self.pool_list) new_pool_list[pool_name] = {} new_pool_list[pool_name]["engine"] = engine new_pool_list[pool_name]["user"] = user new_pool_list[pool_name]["host"] = host new_pool_list[pool_name]["db_name"] = db_name new_pool_list[pool_name]["pool_size"] = pool_size new_pool_list[pool_name]["extra"] = extra new_pool_list[pool_name]["password"] = "" # No password yet. engine_url = engine_def.substitute(new_pool_list[pool_name]) # First save the changes on-disk.. if not self.create_sa_engines: # It's needed here to catch any incorrect extra arguments # passed in the URL. It will raise an exception if SingletonServer # is trying to create such an incorrect engine definition # and the request to create it will never reach the ParallelServers. create_engine(engine_url, pool_size=pool_size, **extra) self.config_repo_manager.update_sql_pool_list(new_pool_list) # .. create the engine .. if self.create_sa_engines: engine = create_engine(engine_url, pool_size=pool_size, **extra) else: engine = _EngineInfo(engine_url, params) # .. and update the list of pools and engines available. self.engines[pool_name] = engine self.pool_list = new_pool_list except Exception, e: msg = "Could not create the SQL connection pool, params=[%s], e=[%s]" % ( pprint(params), format_exc()) self.logger.error(msg) raise ZatoException(msg)
def index(req): """ Lists SQL connection pools and dispatches the management requests (create, edit and change_password). """ zato_servers = Server.objects.all().order_by("name") pools = [] config_pub_key = "" server_id = None choose_server_form = ChooseClusterForm(zato_servers, req.GET) edit_form = SQLConnectionPoolForm(prefix="edit") # Build a list of SQL connection pools for a given Zato server. server_id = req.GET.get("server") if server_id and req.method == "GET": # We have a server to pick the connection pools from, try to # invoke it now. server = Server.objects.get(id=server_id) _ignored, zato_message, soap_response = invoke_admin_service(server.address, "zato:pool.sql.get-list") # Config pub key is always needed. config_pub_key = zato_path("envelope.config_pub_key", True).get_from(zato_message) if zato_path("data.pool_list.pool").get_from(zato_message) is not None: for pool in zato_message.data.pool_list.pool: original_pool_name = unicode(pool.pool_name) pool_name = unicode(pool.pool_name) engine = unicode(pool.engine) engine_friendly = engine_friendly_name[str(engine)] user = unicode(pool.user) host = unicode(pool.host) db_name = unicode(pool.db_name) pool_size = pool.pool_size if path("pool.extra.item").get_from(pool) is not None: logger.log(TRACE1, "Found 'extra.item' in the response, pool_name=[%s]" % pool_name) extra = [] for extra_elem in pool.extra.item: extra.append(unicode(extra_elem)) extra = "\n".join(extra) else: logger.log(TRACE1, "No 'extra.item' found in the response, pool_name=[%s]" % pool_name) extra = "" pool = SQLConnectionPool(uuid4().hex, original_pool_name, pool_name, engine, engine_friendly, user, host, db_name, pool_size, extra) pools.append(pool) else: logger.info("No pools found, soap_response=[%s]" % soap_response) if req.method == "POST": action = req.POST.get("zato_action") if not action: msg = "Missing 'zato_action' parameter in req.POST" logger.error(msg) return HttpResponseServerError(msg) if not server_id: msg = "Parameter 'server' is missing in GET data." if action != "change_password": msg += " Action [%s], req.POST=[%s], req.GET=[%s]" % (action, pprint(req.POST), pprint(req.GET)) logger.error(msg) return HttpResponseServerError(msg) server = Server.objects.get(id=server_id) handler = globals().get("_" + action) if not handler: msg = "No handler found for action [%s]." % action if action != "change_password": msg += " req.POST=[%s], req.GET=[%s]" % (pprint(req.POST), pprint(req.GET)) logger.error(msg) return HttpResponseServerError(msg) # Finally, invoke the action handler. try: response = handler(server.address, req.POST) response = response if response else "" return HttpResponse(response) except Exception, e: msg = "Could not invoke action [%s], e=[%s]. " % (action, format_exc()) if action != "change_password": msg += " req.POST=[%s], req.GET=[%s]" % (pprint(req.POST), pprint(req.GET)) logger.error(msg) return HttpResponseServerError(msg)
def index(req): try: jobs = [] zato_clusters = req.odb.query(Cluster).order_by('name').all() choose_cluster_form = ChooseClusterForm(zato_clusters, req.GET) cluster_id = req.GET.get('cluster') # Build a list of schedulers for a given Zato cluster. if cluster_id and req.method == 'GET': # We have a server to pick the schedulers from, try to invoke it now. cluster = req.odb.query(Cluster).filter_by(id=cluster_id).first() zato_message = Element('{%s}zato_message' % zato_namespace) zato_message.data = Element('data') zato_message.data.cluster_id = cluster_id _, zato_message, soap_response = invoke_admin_service(cluster, 'zato:scheduler.job.get-list', zato_message) if zato_path('data.definition_list.definition').get_from(zato_message) is not None: for job_elem in zato_message.data.definition_list.definition: id = job_elem.id.text name = job_elem.name.text is_active = is_boolean(job_elem.is_active.text) job_type = job_elem.job_type.text start_date = job_elem.start_date.text service_name = job_elem.service_name.text extra = job_elem.extra.text if job_elem.extra.text else '' job_type_friendly = job_type_friendly_names[job_type] job = Job(id, name, is_active, job_type, start_date, extra, service_name=service_name, job_type_friendly=job_type_friendly) if job_type == 'one_time': definition_text=_one_time_job_def(start_date) elif job_type == 'interval_based': definition_text = _interval_based_job_def( _get_start_date(job_elem.start_date, scheduler_date_time_format), job_elem.repeats, job_elem.weeks, job_elem.days, job_elem.hours, job_elem.minutes, job_elem.seconds) weeks = job_elem.weeks.text if job_elem.weeks.text else '' days = job_elem.days.text if job_elem.days.text else '' hours = job_elem.hours.text if job_elem.hours.text else '' minutes = job_elem.minutes.text if job_elem.minutes.text else '' seconds = job_elem.seconds.text if job_elem.seconds.text else '' repeats = job_elem.repeats.text if job_elem.repeats.text else '' ib_job = IntervalBasedJob(None, None, weeks, days, hours, minutes, seconds, repeats) job.interval_based = ib_job elif job_type == 'cron_style': cron_definition = (job_elem.cron_definition.text if job_elem.cron_definition.text else '') definition_text=_cron_style_job_def(start_date, cron_definition) cs_job = CronStyleJob(None, None, cron_definition) job.cron_style = cs_job else: msg = 'Unrecognized job type, name=[{0}], type=[{1}]'.format(name, job_type) logger.error(msg) raise ZatoException(msg) job.definition_text = definition_text jobs.append(job) else: logger.info('No jobs found, soap_response=[{0}]'.format(soap_response)) if req.method == 'POST': action = req.POST.get('zato_action', '') if not action: msg = 'req.POST contains no [zato_action] parameter.' logger.error(msg) return HttpResponseServerError(msg) job_type = req.POST.get('job_type', '') if action != 'execute' and not job_type: msg = 'req.POST contains no [job_type] parameter.' logger.error(msg) return HttpResponseServerError(msg) job_name = req.POST['{0}-{1}-name'.format(action, job_type)] cluster = req.odb.query(Cluster).filter_by(id=cluster_id).one() # Try to match the action and a job type with an action handler.. handler_name = '_' + action if action != 'execute': handler_name += '_' + job_type handler = globals().get(handler_name) if not handler: msg = ('No handler found for action [{0}], job_type=[{1}], ' 'req.POST=[{2}], req.GET=[{3}].'.format(action, job_type, pprint(req.POST), pprint(req.GET))) logger.error(msg) return HttpResponseServerError(msg) # .. invoke the action handler. try: response = handler(cluster, req.POST) response = response if response else '' if response: response['message'] = _get_success_message(action, job_type, job_name) response = dumps(response) return HttpResponse(response, mimetype='application/javascript') except Exception, e: msg = ('Could not invoke action [%s], job_type=[%s], e=[%s]' 'req.POST=[%s], req.GET=[%s]') % (action, job_type, format_exc(), pprint(req.POST), pprint(req.GET)) logger.error(msg) return HttpResponseServerError(msg) # TODO: Log the data returned here. logger.log(TRACE1, 'Returning render_to_response.') return render_to_response('zato/scheduler.html', {'zato_clusters':zato_clusters, 'cluster_id':cluster_id, 'choose_cluster_form':choose_cluster_form, 'jobs':jobs, 'cluster_id':cluster_id, 'friendly_names':job_type_friendly_names.items(), 'create_one_time_form':OneTimeSchedulerJobForm(prefix=create_one_time_prefix), 'create_interval_based_form':IntervalBasedSchedulerJobForm(prefix=create_interval_based_prefix), 'create_cron_style_form':CronStyleSchedulerJobForm(prefix=create_cron_style_prefix), 'edit_one_time_form':OneTimeSchedulerJobForm(prefix=edit_one_time_prefix), 'edit_interval_based_form':IntervalBasedSchedulerJobForm(prefix=edit_interval_based_prefix), 'edit_cron_style_form':CronStyleSchedulerJobForm(prefix=edit_cron_style_prefix), }, context_instance=RequestContext(req))