def _validate_tls(self, input, sec_info): if sec_info['sec_type'] == SEC_DEF_TYPE.TLS_KEY_CERT: if not input.get('sec_tls_ca_cert_id'): raise ZatoException( self.cid, 'TLS CA certs is a required field if TLS keys/certs are used' )
def _invoke(self, name=None, payload='', headers=None, channel='invoke', data_format='json', transport=None, is_async=False, expiration=BROKER.DEFAULT_EXPIRATION, id=None, to_json=True, output_repeated=ZATO_NOT_GIVEN, pid=None, all_pids=False, timeout=None): if not(name or id): raise ZatoException(msg='Either name or id must be provided') if name and output_repeated == ZATO_NOT_GIVEN: output_repeated = name.lower().endswith('list') if to_json: payload = dumps(payload, default=default_json_handler) id_, value = ('name', name) if name else ('id', id) request = { id_: value, 'payload': b64encode(payload.encode('utf8') if PY3 else payload), 'channel': channel, 'data_format': data_format, 'transport': transport, 'is_async': is_async, 'expiration':expiration, 'pid':pid, 'all_pids': all_pids, 'timeout': timeout, } return super(AnyServiceInvoker, self).invoke(dumps(request, default=default_json_handler), ServiceInvokeResponse, is_async, headers, output_repeated)
def _get_slice_period_type(self, start, stop, orig_start, orig_stop): """ Returns information regarding whether a given period should be sliced by minutes, hours, days, months and/or years. """ start = Date(start) stop = Date(stop) if start > stop: msg = 'start:[{}] must not be greater than stop:[{}]'.format(orig_start, orig_stop) raise ZatoException(self.cid, msg) delta = stop - start by_mins = False by_hours_mins = False by_days_hours_mins = False by_months_days_hours_mins = False by_mins = delta.total_minutes <= 60 by_hours_mins = delta.total_minutes > 60 by_days_hours_mins = delta.total_days > 1 by_months_days_hours_mins = delta.total_months > 1 if any((by_days_hours_mins, by_months_days_hours_mins)): by_hours_mins = False if by_months_days_hours_mins: by_days_hours_mins = False return delta, { 'by_mins': by_mins, 'by_hours_mins': by_hours_mins, 'by_days_hours_mins': by_days_hours_mins, 'by_months_days_hours_mins': by_months_days_hours_mins, }
def handle(self): with closing(self.odb.session()) as session: try: service = session.query(Service).\ filter(Service.id==self.request.input.id).\ one() # type: Service internal_del = is_boolean(self.server.fs_server_config.misc. internal_services_may_be_deleted) if service.is_internal and not internal_del: msg = "Can't delete service:[{}], it's an internal one and internal_services_may_be_deleted is not True".format( service.name) raise ZatoException(self.cid, msg) # This will also cascade to delete the related DeployedService objects session.delete(service) session.commit() msg = { 'action': SERVICE.DELETE.value, 'id': self.request.input.id, 'name': service.name, 'impl_name': service.impl_name, 'is_internal': service.is_internal, } self.broker_client.publish(msg) except Exception: session.rollback() msg = 'Service could not be deleted, e:`{}`'.format( format_exc()) self.logger.error(msg) raise
def _get_dict_item(self, id): """ Returns a dictionary entry by its ID. """ for item in self._get_dict_items(): if item['id'] == str(id): return item else: msg = 'Could not find the dictionary by its ID:`{}`'.format(id) raise ZatoException(self.cid, msg)
def invoke(self, target, *args, **kwargs): async_fallback, callback, callback_context, retry_repeats, retry_seconds, kwargs = self._get_retry_settings( target, **kwargs) # Let's invoke the service and find out if it works, maybe we don't need # to retry anything. kwargs['cid'] = kwargs.get('cid', new_cid()) try: result = self.invoking_service.invoke(target, *args, **kwargs) except Exception as e: msg = 'Could not invoke:`{}`, cid:`{}`, e:`{}`'.format( target, self.invoking_service.cid, format_exc()) logger.warn(msg) # How we handle the exception depends on whether the caller wants us # to block or prefers if we retry in background. if async_fallback: # .. invoke the background service and return CID to the caller. return self._invoke_async_retry(target, retry_repeats, retry_seconds, self.invoking_service.cid, kwargs['cid'], callback, callback_context, args, kwargs) # We are to block while repeating else: # Repeat the given number of times sleeping for as many seconds as we are told remaining = retry_repeats result = None while remaining > 1: try: result = self.invoking_service.invoke( target, *args, **kwargs) except Exception as e: msg = retry_failed_msg( (retry_repeats - remaining) + 1, retry_repeats, target, retry_seconds, self.invoking_service.cid, e) logger.info(msg) sleep(retry_seconds) remaining -= 1 # OK, give up now, there's nothing more we can do if not result: msg = retry_limit_reached_msg(retry_repeats, target, retry_seconds, self.invoking_service.cid) raise ZatoException(self.invoking_service.cid, msg) else: # All good, simply return the response return result
def get_slices(self, orig_start, orig_stop): """ Slices the time range into a series of per-minute/-hour/-day/-month or -year statistics. """ slices = [] start = parse_datetime(orig_start) stop = parse_datetime(orig_stop) delta, result = self._get_slice_period_type(start, stop, orig_start, orig_stop) by_mins = result['by_mins'] by_hours_mins = result['by_hours_mins'] by_days_hours_mins = result['by_days_hours_mins'] by_months_days_hours_mins = result['by_months_days_hours_mins'] # Sanity check, find out whether more than one predicate is True. predicates = (by_mins, by_hours_mins, by_days_hours_mins, by_months_days_hours_mins) sum_preds = sum(int(elem) for elem in predicates) if sum_preds > 1: msg = 'sum:[{}] of predicates:[{}] is > 1, delta:[{}, {} {} {} {}], start:[{}], stop:[{}]'.format( sum_preds, predicates, delta, delta.years, delta.months, delta.days, delta.hours, start, stop) raise ZatoException(self.cid, msg) # We require that start and stop be at least that many minutes apart and, obviosuly, # that start lives farther in the past. if by_mins and delta.total_minutes < self.MINIMUM_DIFFERENCE: raise ValueError( 'stop and start must be at least [{}] minutes apart, start must be ' 'farther in past; start:[{}], stop:[{}]'.format( self.MINIMUM_DIFFERENCE, orig_start, orig_stop)) if by_mins: # start=2012-10-23T20:13:00, stop=2012-10-23T21:07:00 slices.append(self.by_minutes(start, stop)) elif by_hours_mins: for slice in self._get_slices_by_hours(start, stop, delta): slices.append(slice) elif by_days_hours_mins or (by_months_days_hours_mins and delta.total_months == 1): for slice in self._get_slices_by_days(start, stop, delta): slices.append(slice) elif by_months_days_hours_mins: for slice in self._get_slices_by_months(start, stop, delta): slices.append(slice) else: for slice in self._get_slices_by_years(start, stop, delta): slices.append(slice) return slices
def _validate_entry(self, validate_item, id=None): for elem in ('system', 'key'): name = self.request.input[elem] match = self.NAME_RE.match(name) if match and match.group() == name: continue else: msg = 'System and key may contain only letters, digits and an underscore, failed to validate `{}` ' + \ 'against the regular expression {}'.format(name, self.NAME_PATTERN) raise ZatoException(self.cid, msg) for item in self._get_dict_items(): joined = KVDB.SEPARATOR.join( (item['system'], item['key'], item['value'])) if validate_item == joined and id != item['id']: msg = 'The triple of system:`{}`, key:`{}`, value:`{}` already exists'.format( item['system'], item['key'], item['value']) raise ZatoException(self.cid, msg) return True
def remote_command_execute(req): """ Executes a command against the key/value DB. """ try: response = req.zato.client.invoke('zato.kvdb.remote-command.execute', {'command': req.POST['command']}) if response.has_data: return HttpResponse(dumps({'message': dumps(response.data.result)}), content_type='application/javascript') else: raise ZatoException(msg=response.details) except Exception as e: return HttpResponseServerError(e.args[0])
def edit(req): try: response = req.zato.client.invoke( 'zato.http-soap.edit', _get_edit_create_message(req.POST, 'edit-')) if response.has_data: return _edit_create_response(req, response.data.id, 'updated', req.POST['transport'], req.POST['connection'], req.POST['edit-name']) else: raise ZatoException(msg=response.details) except Exception: msg = 'Update error, e:`{}`'.format(format_exc()) logger.error(msg) return HttpResponseServerError(msg)
def create(req): try: response = req.zato.client.invoke('zato.http-soap.create', _get_edit_create_message(req.POST)) if response.has_data: return _edit_create_response(req, response.data.id, 'created', req.POST['transport'], req.POST['connection'], req.POST['name']) else: raise ZatoException(msg=response.details) except Exception: msg = 'Object could not be created, e:`{}`'.format(format_exc()) logger.error(msg) return HttpResponseServerError(msg)
def yield_top_n(self, n, n_type, stats_elems): """ Yields top N services. """ if not n_type: msg = 'n_type must not be None if n is neither, n:[{}], n_type:[{}]'.format(n, n_type) self.logger.error(msg) raise ZatoException(self.cid, msg) else: data = dict.fromkeys(stats_elems, 0) for name in data: data[name] = getattr(stats_elems[name], n_type) or 0 # It's itemgetter(1 ,0) because idx=1 is the actual value and idx=0 # is a name so in the end nlargest returns values in ascending order # while also sorting lexicographically services that happen to have equal values. names = nlargest(n, data.items(), key=itemgetter(1, 0)) for name, value in names: yield stats_elems[name]
def handle(self): input_command = self.request.input.command or '' if not input_command: msg = 'No command sent' raise ZatoException(self.cid, msg) try: parse_result = redis_grammar.parseString(input_command) options = {} command = parse_result.command parameters = parse_result.parameters if parse_result.parameters else [] parameters = self._fixup_parameters(parameters) if command == 'CONFIG': options['parse'] = parameters[0] elif command == 'OBJECT': options['infotype'] = parameters[0] response = self.server.kvdb.conn.execute_command( command, *parameters, **options) or '' if response and command in ('KEYS', 'HKEYS', 'HVALS'): response = unicode(response).encode('utf-8') elif command in ('HLEN', 'LLEN', 'LRANGE', 'SMEMBERS', 'HGETALL'): response = str(response) elif command == 'DUMP': response = repr(response) self.response.payload.result = response or '(None)' except Exception as e: msg = 'Command parsing error, command:`{}`, e:`{}`'.format( input_command, e.args[0]) self.logger.error('msg:`%s`, e:`%s`', msg, format_exc()) self.response.payload.result = msg
def _get_item_ids(self): """ Returns IDs of the dictionary entries used in the translation. """ item_ids = {'id1': None, 'id2': None} for idx in ('1', '2'): system = self.request.input.get('system' + idx) key = self.request.input.get('key' + idx) value = self.request.input.get('value' + idx) item_ids['id' + idx] = self._get_dict_item_id(system, key, value) # This is a sanity check, in theory the input data can't possibly be outside # of what's in the KVDB.DICTIONARY_ITEM key for idx in ('1', '2'): if not item_ids['id' + idx]: msg = 'Could not find the ID for system:[{}], key:[{}], value:[{}]'.format( self.request.input.get('system' + idx), self.request.input.get('key' + idx), self.request.input.get('value' + idx)) raise ZatoException(self.cid, msg) return item_ids
def handle(self): with closing(self.odb.session()) as session: try: server = session.query(Server).\ filter(Server.id==self.request.input.id).\ one() # Sanity check if server.id == self.server.id: msg = 'A server cannot delete itself, id:`{}`, name:`{}`'.format(server.id, server.name) self.logger.error(msg) raise ZatoException(self.cid, msg) # This will cascade and delete every related object session.delete(server) session.commit() except Exception: session.rollback() msg = 'Could not delete the server, e:`{}`'.format(format_exc()) self.logger.error(msg) raise
def handle(self): payload = self.request.input.get('payload') if payload: payload = b64decode(payload) payload = payload_from_request(self.cid, payload, self.request.input.data_format, self.request.input.transport) id = self.request.input.get('id') name = self.request.input.get('name') pid = self.request.input.get('pid') or 0 all_pids = self.request.input.get('all_pids') timeout = self.request.input.get('timeout') or None channel = self.request.input.get('channel') data_format = self.request.input.get('data_format') transport = self.request.input.get('transport') expiration = self.request.input.get( 'expiration') or BROKER.DEFAULT_EXPIRATION if name and id: raise ZatoException( 'Cannot accept both id:`{}` and name:`{}`'.format(id, name)) if self.request.input.get('is_async'): if id: impl_name = self.server.service_store.id_to_impl_name[id] name = self.server.service_store.service_data( impl_name)['name'] # If PID is given on input it means we must invoke this particular server process by it ID if pid and pid != self.server.pid: response = self.server.invoke_by_pid(name, payload, pid) else: response = self.invoke_async(name, payload, channel, data_format, transport, expiration) else: # This branch the same as above in is_async branch, except in is_async there was no all_pids # It is possible that we were given the all_pids flag on input but we know # ourselves that there is only one process, the current one, so we can just # invoke it directly instead of going through IPC. if all_pids and self.server.fs_server_config.main.gunicorn_workers > 1: use_all_pids = True else: use_all_pids = False if use_all_pids: args = (name, payload, timeout) if timeout else (name, payload) response = dumps(self.server.invoke_all_pids(*args)) else: if pid and pid != self.server.pid: response = self.server.invoke(name, payload, pid=pid, data_format=data_format) else: func, id_ = (self.invoke, name) if name else (self.invoke_by_id, id) response = func(id_, payload, channel, data_format, transport, serialize=True) if isinstance(response, basestring): if response: response = response if isinstance( response, bytes) else response.encode('utf8') self.response.payload.response = b64encode(response).decode( 'utf8') if response else ''
def _exception(): msg = 'A mapping between system1:[{}], key1:[{}], value1:[{}] and system2:[{}], key2:[{}] already exists'.format( system1, key1, value1, system2, key2) self.logger.error(msg) raise ZatoException(self.cid, msg)
def validate_extra(self, cid, extra): if extra and not '=' in extra: raise ZatoException( cid, 'extra should be a list of key=value parameters, possibly one-element long, instead of `{}`' .format(extra))
def index(req): try: jobs = [] meta = None # Build a list of schedulers for a given Zato cluster. if req.zato.cluster_id and req.method == 'GET': # We have a cluster to pick the schedulers from, try to invoke it now. request = { 'cluster_id': req.zato.cluster_id, 'paginate': True, 'cur_page': req.GET.get('cur_page', 1), 'query': req.GET.get('query', '') } data, meta = parse_response_data(req.zato.client.invoke('zato.scheduler.job.get-list', request)) for job_elem in data: id = job_elem.id name = job_elem.name is_active = job_elem.is_active job_type = job_elem.job_type start_date = job_elem.start_date service_name = job_elem.service_name extra = job_elem.extra job_type_friendly = job_type_friendly_names[job_type] job = Job(id, name, is_active, job_type, from_utc_to_user(start_date+'+00:00', req.zato.user_profile), extra, service_name=service_name, job_type_friendly=job_type_friendly) if job_type == SCHEDULER.JOB_TYPE.ONE_TIME: definition_text=_one_time_job_def(req.zato.user_profile, start_date) elif job_type == SCHEDULER.JOB_TYPE.INTERVAL_BASED: definition_text = _interval_based_job_def(req.zato.user_profile, _get_start_date(job_elem.start_date), job_elem.repeats, job_elem.weeks, job_elem.days, job_elem.hours, job_elem.minutes, job_elem.seconds) weeks = job_elem.weeks or '' days = job_elem.days or '' hours = job_elem.hours or '' minutes = job_elem.minutes or '' seconds = job_elem.seconds or '' repeats = job_elem.repeats or '' ib_job = IntervalBasedJob(None, None, weeks, days, hours, minutes, seconds, repeats) job.interval_based = ib_job elif job_type == SCHEDULER.JOB_TYPE.CRON_STYLE: cron_definition = job_elem.cron_definition or '' definition_text=_cron_style_job_def(req.zato.user_profile, start_date, cron_definition) cs_job = CronStyleJob(None, None, cron_definition) job.cron_style = cs_job else: msg = 'Unrecognized job type, name:`{}`, type:`{}`'.format(name, job_type) logger.error(msg) raise ZatoException(msg) job.definition_text = definition_text jobs.append(job) if req.method == 'POST': action = req.POST.get('zato_action', '') if not action: msg = 'req.POST contains no [zato_action] parameter.' logger.error(msg) return HttpResponseServerError(msg) job_type = req.POST.get('job_type', '') if action != 'execute' and not job_type: msg = 'req.POST contains no [job_type] parameter.' logger.error(msg) return HttpResponseServerError(msg) job_name = req.POST['{0}-{1}-name'.format(action, job_type)] # Try to match the action and a job type with an action handler.. handler_name = '_' + action if action != 'execute': handler_name += '_' + job_type handler = globals().get(handler_name) if not handler: msg = ('No handler found for action [{0}], job_type:[{1}], ' 'req.POST:[{2}], req.GET:[{3}].'.format(action, job_type, pprint(req.POST), pprint(req.GET))) logger.error(msg) return HttpResponseServerError(msg) # .. invoke the action handler. try: response = handler(req.zato.client, req.zato.user_profile, req.zato.cluster, req.POST) response = response if response else '' if response: response['message'] = _get_success_message(action, job_type, job_name) response = dumps(response) return HttpResponse(response, content_type='application/javascript') except Exception: msg = 'Could not invoke action `{}`, job_type:`{}`, e:`{}` req.POST:`{}`, req.GET:`{}'.format( action, job_type, format_exc(), req.POST, req.GET) logger.error(msg) return HttpResponseServerError(msg) template_name = 'zato/scheduler.html' return_data = {'zato_clusters':req.zato.clusters, 'cluster_id':req.zato.cluster_id, 'search_form':req.zato.search_form, 'jobs':jobs, 'friendly_names':job_type_friendly_names.items(), 'create_one_time_form':OneTimeSchedulerJobForm(create_one_time_prefix, req), 'create_interval_based_form':IntervalBasedSchedulerJobForm(create_interval_based_prefix, req), 'create_cron_style_form':CronStyleSchedulerJobForm(create_cron_style_prefix, req), 'edit_one_time_form':OneTimeSchedulerJobForm(edit_one_time_prefix, req), 'edit_interval_based_form':IntervalBasedSchedulerJobForm(edit_interval_based_prefix, req), 'edit_cron_style_form':CronStyleSchedulerJobForm(edit_cron_style_prefix, req), 'paginate':True, 'meta': meta, 'req': req, 'zato_template_name': template_name, } return_data.update(get_js_dt_format(req.zato.user_profile)) return TemplateResponse(req, template_name, return_data) except Exception: msg = '<pre>Method could not be invoked, e:`{}`</pre>'.format(format_exc()) logger.error(msg) return HttpResponseServerError(msg)
def _create_edit(action, cid, input, payload, logger, session, broker_client, response): """ Creating and updating a job requires a series of very similar steps so they've been all put here and depending on the 'action' parameter (be it 'create'/'edit') some additional operations are performed. """ job_type = input.job_type cluster_id = input.cluster_id name = input.name service_name = input.service cluster = session.query(Cluster).\ filter(Cluster.id==cluster_id).\ one() if job_type not in(SCHEDULER.JOB_TYPE.ONE_TIME, SCHEDULER.JOB_TYPE.INTERVAL_BASED, SCHEDULER.JOB_TYPE.CRON_STYLE): msg = 'Unrecognized job type [{0}]'.format(job_type) logger.error(msg) raise ZatoException(cid, msg) # For finding out if we don't have a job of that name already defined. existing_one_base = session.query(Job).\ filter(Cluster.id==cluster_id).\ filter(Job.name==name) if action == 'create': existing_one = existing_one_base.\ first() else: job_id = input.id existing_one = existing_one_base.\ filter(Job.id != job_id).\ first() if existing_one: raise ZatoException(cid, 'Job `{}` already exists on this cluster'.format(name)) # Is the service's name correct? service = session.query(Service).\ filter(Cluster.id==cluster_id).\ filter(Service.cluster_id==Cluster.id).\ filter(Service.name==service_name).\ first() if not service: msg = 'Service `{}` does not exist on this cluster'.format(service_name) logger.error(msg) raise ZatoException(cid, msg) # We can create/edit a base Job object now and - optionally - another one # if the job type's is either interval-based or Cron-style. The base # instance will be enough if it's a one-time job. extra = (input.extra or u'').encode('utf-8') is_active = input.is_active start_date = parse_datetime(input.start_date) if action == 'create': job = Job(None, name, is_active, job_type, start_date, extra, cluster=cluster, service=service) else: job = session.query(Job).filter_by(id=job_id).one() old_name = job.name job.name = name job.is_active = is_active job.start_date = start_date job.service = service job.extra = extra try: # Add but don't commit yet. session.add(job) if job_type == SCHEDULER.JOB_TYPE.INTERVAL_BASED: ib_params = ('weeks', 'days', 'hours', 'minutes', 'seconds') if not any(input[key] for key in ib_params): msg = "At least one of ['weeks', 'days', 'hours', 'minutes', 'seconds'] must be given" logger.error(msg) raise ZatoException(cid, msg) if action == 'create': ib_job = IntervalBasedJob(None, job) else: ib_job = session.query(IntervalBasedJob).filter_by(id=job.interval_based.id).one() for param in ib_params + ('repeats',): value = input[param] or None if value != ZATO_NONE: setattr(ib_job, param, value) value = input['repeats'] or None if value != ZATO_NONE: setattr(ib_job, 'repeats', value) session.add(ib_job) elif job_type == SCHEDULER.JOB_TYPE.CRON_STYLE: cron_definition = input.cron_definition.strip() # Just to make sure it's syntactically correct CronTab(cron_definition).next(default_utc=False) if action == 'create': cs_job = CronStyleJob(None, job) else: cs_job = session.query(CronStyleJob).filter_by(id=job.cron_style.id).one() cs_job.cron_definition = cron_definition session.add(cs_job) # We can commit it all now. session.commit() # Now send it to the broker, but only if the job is active. # if is_active: msg_action = SCHEDULER_MSG.CREATE.value if action == 'create' else SCHEDULER_MSG.EDIT.value msg = {'action': msg_action, 'job_type': job_type, 'is_active':is_active, 'start_date':start_date.isoformat(), 'extra':extra.decode('utf8'), 'service': service.name, 'id':job.id, 'name': name } if action == 'edit': msg['old_name'] = old_name if job_type == SCHEDULER.JOB_TYPE.INTERVAL_BASED: for param in ib_params + ('repeats',): value = input[param] msg[param] = int(value) if value else 0 elif job_type == SCHEDULER.JOB_TYPE.CRON_STYLE: msg['cron_definition'] = cron_definition broker_client.publish(msg, MESSAGE_TYPE.TO_SCHEDULER) except Exception: session.rollback() logger.error('Could not complete the request, e:`%s`', format_exc()) raise else: response.payload.id = job.id response.payload.name = input.name if job_type == SCHEDULER.JOB_TYPE.CRON_STYLE: # Needs to be returned because we might've been performing # a substitution like changing '@hourly' into '0 * * * *'. response.payload.cron_definition = cs_job.cron_definition
def __call__(self, req, initial_input_dict={}, initial_return_data={}, *args, **kwargs): """ Handles the request, taking care of common things and delegating control to the subclass for fetching this view-specific data. """ self.input_dict.clear() self.clear_user_message() try: super(CreateEdit, self).__call__(req, *args, **kwargs) self.set_input() self.populate_initial_input_dict(initial_input_dict) input_dict = {'cluster_id': self.cluster_id} post_id = self.req.POST.get('id') if post_id: input_dict['id'] = post_id input_dict.update(initial_input_dict) for name in chain(self.SimpleIO.input_required, self.SimpleIO.input_optional): if name not in input_dict and name not in self.input_dict: value = self.input.get(name) value = self.pre_process_item(name, value) if value != SKIP_VALUE: input_dict[name] = value self.input_dict.update(input_dict) self.pre_process_input_dict(self.input_dict) logger.info('Request self.input_dict %s', self.input_dict) logger.info('Request self.SimpleIO.input_required %s', self.SimpleIO.input_required) logger.info('Request self.SimpleIO.input_optional %s', self.SimpleIO.input_optional) logger.info('Request self.input %s', self.input) logger.info('Request self.req.GET %s', self.req.GET) logger.info('Request self.req.POST %s', self.req.POST) logger.info('Sending `%s` to `%s`', self.input_dict, self.service_name) response = self.req.zato.client.invoke(self.service_name, self.input_dict) if response.ok: return_data = {'message': self.success_message(response.data)} return_data.update(initial_return_data) for name in chain(self.SimpleIO.output_optional, self.SimpleIO.output_required): if name not in initial_return_data: value = getattr(response.data, name, None) if value: value = str(value) return_data[name] = value self.post_process_return_data(return_data) logger.info('CreateEdit data for frontend `%s`', return_data) return HttpResponse(dumps(return_data), content_type='application/javascript') else: msg = 'response:`{}`, details.response.details:`{}`'.format( response, response.details) logger.error(msg) raise ZatoException(msg=msg) except Exception: return HttpResponseServerError(format_exc())