def wes_collect_attachments(self, run_id): tempdir = tempfile.mkdtemp(dir=get_dir( path.abspath( conf_get("cwl", "tmp_folder", path.join(AIRFLOW_HOME, "cwl_tmp_folder")))), prefix="run_id_" + run_id + "_") logging.debug(f"Save all attached files to {tempdir}") for k, ls in iterlists(connexion.request.files): logging.debug(f"Process attachment parameter {k}") if k == "workflow_attachment": for v in ls: try: logging.debug(f"Process attached file {v}") sp = v.filename.split("/") fn = [] for p in sp: if p not in ("", ".", ".."): fn.append(secure_filename(p)) dest = path.join(tempdir, *fn) if not path.isdir(path.dirname(dest)): get_dir(path.dirname(dest)) logging.debug(f"Save {v.filename} to {dest}") v.save(dest) except Exception as err: raise ValueError( f"Failed to process attached file {v}, {err}") body = {} for k, ls in iterlists(connexion.request.form): logging.debug(f"Process form parameter {k}") for v in ls: try: if not v: continue if k == "workflow_params": job_file = path.join(tempdir, "job.json") with open(job_file, "w") as f: json.dump(json.loads(v), f, indent=4) logging.debug(f"Save job file to {job_file}") loader = Loader(load.jobloaderctx.copy()) job_order_object, _ = loader.resolve_ref( job_file, checklinks=False) body[k] = job_order_object else: body[k] = v except Exception as err: raise ValueError( f"Failed to process form parameter {k}, {v}, {err}") if "workflow_params" not in body or "workflow_url" not in body: raise ValueError( "Missing 'workflow_params' or 'workflow_url' in submission") body["workflow_url"] = path.join(tempdir, secure_filename(body["workflow_url"])) return tempdir, body
def collect_attachments(self, run_id=None): tempdir = tempfile.mkdtemp() body = {} has_attachments = False for k, ls in iterlists(connexion.request.files): try: for v in ls: if k == "workflow_attachment": sp = v.filename.split("/") fn = [] for p in sp: if p not in ("", ".", ".."): fn.append(secure_filename(p)) dest = os.path.join(tempdir, *fn) if not os.path.isdir(os.path.dirname(dest)): os.makedirs(os.path.dirname(dest)) self.log_for_run(run_id, "Staging attachment '%s' to '%s'" % (v.filename, dest)) v.save(dest) has_attachments = True body[k] = "file://%s" % tempdir # Reference to temp working dir. elif k in ("workflow_params", "tags", "workflow_engine_parameters"): content = v.read() body[k] = json.loads(content.decode("utf-8")) else: body[k] = v.read().decode() except Exception as e: raise ValueError("Error reading parameter '%s': %s" % (k, e)) for k, ls in iterlists(connexion.request.form): try: for v in ls: if not v: continue if k in ("workflow_params", "tags", "workflow_engine_parameters"): body[k] = json.loads(v) else: body[k] = v except Exception as e: raise ValueError("Error reading parameter '%s': %s" % (k, e)) if "workflow_url" in body: if ":" not in body["workflow_url"]: if not has_attachments: raise ValueError("Relative 'workflow_url' but missing 'workflow_attachment'") body["workflow_url"] = "file://%s" % os.path.join(tempdir, secure_filename(body["workflow_url"])) self.log_for_run(run_id, "Using workflow_url '%s'" % body.get("workflow_url")) else: raise ValueError("Missing 'workflow_url' in submission") if "workflow_params" not in body: raise ValueError("Missing 'workflow_params' in submission") return tempdir, body
def type_convert(self, obj): if obj is None: return None if isinstance(obj, (dict, list)) and not isinstance(obj, MultiDict): return obj if isinstance(obj, Headers): obj = MultiDict(six.iteritems(obj)) result = dict() convert_funs = { 'integer': lambda v: int(v[0]), 'boolean': lambda v: v[0].lower() not in ['n', 'no', 'false', '', '0'], 'null': lambda v: None, 'number': lambda v: float(v[0]), 'string': lambda v: v[0] } def convert_array(type_, v): func = convert_funs.get(type_, lambda v: v[0]) return [func([i]) for i in v] for k, values in six.iterlists(obj): prop = self.validator.schema['properties'].get(k, {}) type_ = prop.get('type') fun = convert_funs.get(type_, lambda v: v[0]) if type_ == 'array': item_type = prop.get('items', {}).get('type') result[k] = convert_array(item_type, values) else: result[k] = fun(values) return result
def export(request, type): form_dict = dict(six.iterlists(request.GET)) if type == 'pivoted': df = get_df(form_dict) # 透视后的数据 sheet_name = aggfunc + '(' + value + ')' elif type == 'raw': df = get_df(form_dict, is_pivoted=False) # 原始数 sheet_name = '原始数据' excel_file = IO() xlwriter = pd.ExcelWriter(excel_file) df.to_excel(xlwriter, sheet_name=sheet_name) xlwriter.save() xlwriter.close() #重新设置起始位置,在这里等同于excel_file.close() excel_file.seek(0) # 设置浏览器mime类型 # MIME (Multipurpose Internet Mail Extensions) 是描述消息内容类型的因特网标准。 # MIME 消息能包含文本、图像、音频、视频以及其他应用程序专用的数据。 response = HttpResponse( excel_file.read(), content_type= 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet' ) #即为xlsx类型 # 设置文件名 now = datetime.datetime.now().strftime( "%Y%m%d%H%M%S") # 当前精确时间不会重复,适合用来命名默认导出文件 response['Content-Disposition'] = 'attachment; filename=' + now + '.xlsx' return response
def frontend_url(request, url=None, back_link=None, absolute=True): """Construct an url for a frontend view. :keyword back: type of the back link to be added to the query string - here: link to the current request page - here_if_none: add link only if there is no `back` parameter :keyword absolute: construct absolute url, including host name namespace = self.ns_map[task.process.flow_class] return frontend_url( self.request, flow_url(namespace, task, 'index', user=request.user), back='here') """ params = QueryDict(mutable=True) for key, value in six.iterlists(request.GET): if not key.startswith('datatable-') and key != '_': params.setlist(key, value) if back_link == 'here_if_none' and 'back' in params: # Do nothing pass elif back_link is not None: if params: back = "{}?{}".format(quote(request.path), quote(params.urlencode())) else: back = "{}".format(quote(request.path)) params['back'] = back if url is not None: location = '{}?{}'.format(url, params.urlencode()) return request.build_absolute_uri(location) if absolute else location else: return params.urlencode()
def download_xls_favorites(request): """Возвращает JSON с id асинхронной задачи на формирование файла с результатами содержимого в избранном.""" task = create_favorites_results_file.delay( request.user.pk, request.session['favorites_ids'], dict(six.iterlists(request.GET)), 'ua' if request.LANGUAGE_CODE == 'uk' else 'en') return JsonResponse({'task_id': task.id})
def get_raw(request, is_json=False): http_method = check_method(request.method, is_json) if is_json: raw = _get_json(request) else: # only handle GET and POST because django only have these two attrs on request # NOTE can also use QueryDict(request.body) for PUT, DELETE, but that will take # more uncessary complexity if http_method == 'GET': _raw = request.GET elif http_method == 'POST': _raw = request.POST else: _raw = {} # convert django <QueryDict> to dict, when <QueryDict> # is like <QueryDict {'a': ['1'], 'b': ['x', 'y']}>, # iteritems will make 'a' return '1', 'b' return 'y', # we should convert it to a normal dict so that 'b' keeps # ['x', 'y'] raw = {} for k, v in six.iterlists(_raw): if not v: continue if len(v) == 1: raw[k] = v[0] else: raw[k] = v return raw
def download_xls_transactions(request): """Возвращает JSON с id асинхронной задачи на формирование файла с результатами поиска по оповещениям.""" task = create_transactions_search_results_file.delay( dict(six.iterlists(request.GET)), 'ua' if request.LANGUAGE_CODE == 'uk' else 'en' ) return JsonResponse({'task_id': task.id})
def export(request, type): form_dict = dict(six.iterlists(request.GET)) if type == "pivoted": df = get_df(form_dict) # 透视后的数据 elif type == "raw": df = get_df(form_dict, is_pivoted=False) # 原始数 excel_file = IO() xlwriter = pd.ExcelWriter(excel_file, engine="xlsxwriter") df.to_excel(xlwriter, "data", index=True) xlwriter.save() xlwriter.close() excel_file.seek(0) # 设置浏览器mime类型 response = HttpResponse( excel_file.read(), content_type= "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", ) # 设置文件名 now = datetime.datetime.now().strftime( "%Y%m%d%H%M%S") # 当前精确时间不会重复,适合用来命名默认导出文件 response["Content-Disposition"] = "attachment; filename=" + now + ".xlsx" return response
def get_context_data(self, **kwargs): """Передаёт доп. переменные в шаблон.""" context = super().get_context_data(**kwargs) # Текущий язык приложения lang_code = 'ua' if self.request.LANGUAGE_CODE == 'uk' else 'en' context['lang_code'] = lang_code # Данные страницы page_data, created = SimpleSearchPage.objects.get_or_create() context['page_description'] = getattr(page_data, f"description_{self.request.LANGUAGE_CODE}") # Recaptcha context['site_key'] = settings.RECAPTCHA_SITE_KEY context['RECAPTCHA_ENABLED'] = settings.RECAPTCHA_ENABLED # Параметры поиска context['search_parameter_types'] = list(SimpleSearchField.objects.annotate( field_label=F(f"field_label_{lang_code}") ).annotate( field_type=F('elastic_index_field__field_type') ).values( 'id', 'field_label', 'field_type', ).filter( is_visible=True ).order_by( '-weight' )) context['show_search_form'] = True context['initial_data'] = {'form-TOTAL_FORMS': 1} SimpleSearchFormSet = formset_factory(SimpleSearchForm) if self.request.GET.get('form-TOTAL_FORMS'): formset = SimpleSearchFormSet(self.request.GET) context['initial_data'] = dict(formset.data.lists()) # Признак того что производится поиск context['is_search'] = True # Показывать или скрывать поисковую форму context['show_search_form'] = self.request.session.get('show_search_form', False) # Количество результатов на странице self.request.session['show'] = self.request.GET.get( 'show', self.request.session.get('show', 10) ) get_params = dict(six.iterlists(self.request.GET)) get_params['show'] = [self.request.session['show']] # Создание асинхронной задачи для Celery task = perform_simple_search.delay( self.request.user.pk, get_params ) context['task_id'] = task.id return context
def collect_attachments(self, run_id=None): tempdir = tempfile.mkdtemp() body = {} for k, ls in iterlists(connexion.request.files): for v in ls: if k == "workflow_attachment": sp = v.filename.split("/") fn = [] for p in sp: if p not in ("", ".", ".."): fn.append(secure_filename(p)) dest = os.path.join(tempdir, *fn) if not os.path.isdir(os.path.dirname(dest)): os.makedirs(os.path.dirname(dest)) self.log_for_run( run_id, "Staging attachment '%s' to '%s'" % (v.filename, dest)) v.save(dest) body[ k] = "file://%s" % tempdir # Reference to temp working dir. elif k in ("workflow_params", "tags", "workflow_engine_parameters"): content = v.read() body[k] = json.loads(content) else: body[k] = v.read() if ":" not in body["workflow_url"]: body["workflow_url"] = "file://%s" % os.path.join( tempdir, secure_filename(body["workflow_url"])) self.log_for_run(run_id, "Using workflow_url '%s'" % body.get("workflow_url")) return tempdir, body
def download_xls_advanced(request): """Возвращает JSON с id асинхронной задачи на формирование файла с результатами расширенного поиска.""" task = create_advanced_search_results_file.delay( request.user.pk, dict(six.iterlists(request.GET)), 'ua' if request.LANGUAGE_CODE == 'uk' else 'en' ) return JsonResponse({'task_id': task.id})
def get_request_params(self, request): if request.query_params: qp = request.query_params.copy() for param in self.proxy_settings.DISALLOWED_PARAMS: if param in qp: del qp[param] return six.iterlists(qp) return {}
def get_context_data(self, **kwargs): """Передаёт доп. переменные в шаблон.""" context = super().get_context_data(**kwargs) # Текущий язык приложения context['lang_code'] = 'ua' if self.request.LANGUAGE_CODE == 'uk' else 'en' # Данные страницы page_data, created = AdvancedSearchPage.objects.get_or_create() context['page_description'] = getattr(page_data, f"description_{self.request.LANGUAGE_CODE}") # Типы ОПС context['obj_types'] = list( ObjType.objects.order_by('order').annotate( value=F(f"obj_type_{context['lang_code']}") ).values('id', 'value') ) # ИНИД-коды вместе с их реестрами context['ipc_codes'] = get_ipc_codes_with_schedules(context['lang_code']) # Recaptcha context['site_key'] = settings.RECAPTCHA_SITE_KEY context['show_search_form'] = True context['initial_data'] = {'form-TOTAL_FORMS': 1} AdvancedSearchFormSet = formset_factory(AdvancedSearchForm) if self.request.GET.get('form-TOTAL_FORMS'): formset = AdvancedSearchFormSet(self.request.GET) # Иниц. данные для формы context['initial_data'] = dict(formset.data.lists()) # Признак того что производится поиск context['is_search'] = True # Показывать или скрывать поисковую форму context['show_search_form'] = self.request.session.get('show_search_form', False) # Количество результатов на странице self.request.session['show'] = self.request.GET.get( 'show', self.request.session.get('show', 10) ) get_params = dict(six.iterlists(self.request.GET)) get_params['show'] = [self.request.session['show']] # Поиск в ElasticSearch # Создание асинхронной задачи для Celery task = perform_advanced_search.delay( self.request.user.pk, get_params ) context['task_id'] = task.id context['RECAPTCHA_ENABLED'] = settings.RECAPTCHA_ENABLED return context
def update(self, request, *args, **kwargs): instance = self.get_object() d = dict(six.iterlists(request.data)) reasons = [] for key, value in sorted(six.iteritems(d)): reasons.append(value[0]) instance.reasons = reasons instance.save() return Response({'result': 'success'})
def shared_pagination_GET_params(self): """ Override the params and applies all the params of the originating view to the GET so as to get sorting working correctly with the context of the GET params """ ret = super(GetParamsMixin, self).shared_pagination_GET_params for k, v in six.iterlists(self.request.GET): ret.append(dict(name=k, value=v)) return ret
def this_is_okay(): d = {} iterkeys(d) six.iterkeys(d) six.itervalues(d) six.iteritems(d) six.iterlists(d) six.viewkeys(d) six.viewvalues(d) six.viewlists(d) itervalues(d) future.utils.iterkeys(d) future.utils.itervalues(d) future.utils.iteritems(d) future.utils.iterlists(d) future.utils.viewkeys(d) future.utils.viewvalues(d) future.utils.viewlists(d) six.next(d) builtins.next(d)
def querydict_to_multidict(query_dict, wrap=None): """ Returns a new `webob.MultiDict` from a `django.http.QueryDict`. If `wrap` is provided, it's used to wrap the values. """ wrap = wrap or (lambda val: val) return MultiDict(chain.from_iterable( six.moves.zip(repeat(key), (wrap(v) for v in vals)) for key, vals in six.iterlists(query_dict) ))
def get_context_data(self, **kwargs): """Передаёт доп. переменные в шаблон.""" context = super().get_context_data(**kwargs) # Текущий язык приложения context['lang_code'] = 'ua' if self.request.LANGUAGE_CODE == 'uk' else 'en' # Recaptcha context['RECAPTCHA_ENABLED'] = settings.RECAPTCHA_ENABLED context['site_key'] = settings.RECAPTCHA_SITE_KEY context['initial_data'] = dict() context['is_search'] = False context['show_search_form'] = True if self.request.GET.get('obj_type') and self.request.GET.get('transaction_type') \ and self.request.GET.get('date'): context['is_search'] = True # Показывать или скрывать поисковую форму context['show_search_form'] = self.request.session.get('show_search_form', False) # Количество результатов на странице self.request.session['show'] = self.request.GET.get( 'show', self.request.session.get('show', 10) ) get_params = dict(six.iterlists(self.request.GET)) get_params['show'] = [self.request.session['show']] context['initial_data'] = dict(six.iterlists(self.request.GET)) # Поиск # Создание асинхронной задачи для Celery task = perform_transactions_search.delay( get_params ) context['task_id'] = task.id return context
def _get_data(self): adapter = self.data_source['adapter'] geo_col = self.data_source.get('geo_column', 'geo') try: loader = getattr(self, '_get_data_%s' % adapter) except AttributeError: raise RuntimeError('unknown adapter [%s]' % adapter) config = dict(six.iterlists(self.request.GET)) for k, v in six.iteritems(config): if len(v) == 1: config[k] = v[0] data = loader(self.data_source, config) return self._to_geojson(data, geo_col)
def query(request): # six库主要是为了兼容python2和python3 # 调用Python 2中的dictionary.iterlists() 或Python 3中的dictionary.lists() form_dict = dict(six.iterlists(request.GET)) print("前端表单转换为字典:") print(form_dict) pivoted = get_df(form_dict) df = get_df(form_dict, is_pivoted=False) # KPI # kpi = get_kpi(pivoted) # table = ptable(pivoted) # 透视表格 table = pivoted.to_html( formatters=build_formatters_by_col(pivoted), # 逐列调整表格内数字格式 classes= 'ui selectable striped nowrap celled table', # 指定表格css class为Semantic UI主题 table_id='ptable' # 指定表格id ) # 原数据表格 inittable = df.to_html( #formatters=build_formatters_by_col(df), # 逐列调整表格内数字格式 classes= 'ui selectable striped nowrap celled table ', # 指定表格css class为Semantic UI主题 table_id='initdata_table' # 指定表格id ) #describe和valuecounts函数转为图表 info_chart = json.loads( prepare_chart(df, 'get_info_chart', index, column, aggfunc, value)) # 原数据图 origin_data_chart = json.loads( prepare_chart(df, 'creat_origindata_chart', index, column, aggfunc, value)) # 3d透视图 pivot_chart = json.loads( prepare_chart(pivoted, 'get_pivot_chart', index, column, aggfunc, value)) context = { 'ptable': table, "initdata_table": inittable, 'info_chart': info_chart, 'pivot_chart': pivot_chart, 'origin_data_chart': origin_data_chart } return HttpResponse( json.dumps(context, ensure_ascii=False), content_type="application/json charset=utf-8") # 返回结果必须是json格式
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['favorites_ids'] = self.request.session.get('favorites_ids') # Recaptcha context['site_key'] = settings.RECAPTCHA_SITE_KEY context['RECAPTCHA_ENABLED'] = settings.RECAPTCHA_ENABLED if context['favorites_ids']: # Создание асинхронной задачи для Celery task = perform_favorites_search.delay( context['favorites_ids'], self.request.user.pk, dict(six.iterlists(self.request.GET))) context['task_id'] = task.id return context
def querystring(data, exclude="page,all"): """ Returns the current querystring, excluding specified GET parameters:: {% request.GET|querystring:"page,all" %} """ exclude = exclude.split(",") items = reduce( operator.add, ( list((k, v) for v in values) for k, values in six.iterlists(data) if k not in exclude ), [], ) return urlencode(sorted(items))
def get_raw(request, is_json=False): http_method = check_method(request.method, is_json) if is_json: raw = _get_json(request) else: _raw = getattr(request, http_method) # convert django <QueryDict> to dict, when <QueryDict> # is like <QueryDict {'a': ['1'], 'b': ['x', 'y']}>, # iteritems will make 'a' return '1', 'b' return 'y', # we should convert it to a normal dict so that 'b' keeps # ['x', 'y'] raw = {} for k, v in six.iterlists(_raw): if not v: continue if len(v) == 1: raw[k] = v[0] else: raw[k] = v return raw
def parse_args_for_es(request, prefix=None): """ Parses a request's query string for url parameters. It specifically parses the facet url parameter so that each term is counted as a separate facet. e.g. 'facets=region author category' -> facets = ['region', 'author', 'category'] """ def strip_array(str): return str[:-2] if str.endswith('[]') else str params, facets = {}, [] for attr in six.iterlists(request.GET): param, vals = attr[0], attr[1] if param == 'facets': facets = vals[0].split() continue if prefix: if param.startswith(prefix): params[strip_array(param[len(prefix):])] = [unquote(a) for a in vals] else: params[strip_array(param)] = [unquote(a) for a in vals] return params, facets
def collect_attachments(self): tempdir = tempfile.mkdtemp() body = {} for k, ls in iterlists(connexion.request.files): for v in ls: if k == "workflow_attachment": filename = secure_filename(v.filename) v.save(os.path.join(tempdir, filename)) body[ k] = "file://%s" % tempdir # Reference to tem working dir. elif k in ("workflow_params", "tags", "workflow_engine_parameters"): body[k] = json.loads(v.read()) else: body[k] = v.read() if ":" not in body["workflow_url"]: body["workflow_url"] = "file://%s" % os.path.join( tempdir, secure_filename(body["workflow_url"])) return tempdir, body
def query(request): print(request.GET) form_dict = dict(six.iterlists(request.GET)) label = D_TRANS[form_dict['PERIOD_select'][0]] + D_TRANS[ form_dict['UNIT_select'][0]] # 分析时间段+单位组成数据标签 pivoted = get_df(form_dict) # 最新横截面表现表格 table = get_ptable(pivoted, label) ptable = table.to_html( formatters=build_formatters_by_col(table), # 逐列调整表格内数字格式 classes='ui selectable celled table', # 指定表格css class为Semantic UI主题 table_id='ptable' # 指定表格id ) # 趋势表现表格 table = get_ptable_trend(pivoted, label) ptable_trend = table.to_html( formatters=build_formatters_by_col(table), # 逐列调整表格内数字格式 classes='ui selectable celled table', # 指定表格css class为Semantic UI主题 table_id='ptable_trend' # 指定表格id ) # 价格分析表格 table = get_price(form_dict, 'Volume') price_table_box = table.to_html( formatters=build_formatters_by_col(table), # 逐列调整表格内数字格式 classes='ui selectable celled table', # 指定表格css class为Semantic UI主题 table_id='price_table_box' # 指定表格id ) table = get_price(form_dict, 'Volume (Counting Unit)') price_table_cnt = table.to_html( formatters=build_formatters_by_col(table), # 逐列调整表格内数字格式 classes='ui selectable celled table', # 指定表格css class为Semantic UI主题 table_id='price_table_cnt' # 指定表格id ) # Pyecharts交互图表 bar_total_trend = json.loads( prepare_chart(pivoted, 'bar_total_trend', form_dict)) stackarea_abs_trend = json.loads( prepare_chart(pivoted, 'stackarea_abs_trend', form_dict)) stackarea_share_trend = json.loads( prepare_chart(pivoted, 'stackarea_share_trend', form_dict)) line_gr_trend = json.loads( prepare_chart(pivoted, 'line_gr_trend', form_dict)) context = { 'label': label, 'market_size': kpi(pivoted)[0], 'market_gr': kpi(pivoted)[1], 'market_cagr': kpi(pivoted)[2], 'ptable': ptable, 'ptable_trend': ptable_trend, 'price_table_box': price_table_box, 'price_table_cnt': price_table_cnt, 'bar_total_trend': bar_total_trend, 'stackarea_abs_trend': stackarea_abs_trend, 'stackarea_share_trend': stackarea_share_trend, 'line_gr_trend': line_gr_trend } # 根据查询选线决定是否展示Matplotlib静态图表 if form_dict['toggle_bubble_perf'][0] == 'true': bubble_performance = prepare_chart(pivoted, 'bubble_performance', form_dict) context['bubble_performance'] = bubble_performance return HttpResponse( json.dumps(context, ensure_ascii=False), content_type="application/json charset=utf-8") # 返回结果必须是json格式
def send_email_report(self, recipient_emails, domain, report_slug, report_type, request_data, once, cleaned_data): """ Function invokes send_HTML_email to email the html text report. If the report is too large to fit into email then a download link is sent via email to download report :Parameter recipient_list: list of recipient to whom email is to be sent :Parameter domain: domain name :Parameter report_slug: report slug :Parameter report_type: type of the report :Parameter request_data: Dict containing request data :Parameter once boolean argument specifying whether the report is once off report or scheduled report :Parameter cleaned_data: Dict containing cleaned data from the submitted form """ from corehq.apps.reports.views import _render_report_configs, render_full_report_notification user_id = request_data['couch_user'] couch_user = CouchUser.get_by_user_id(user_id) mock_request = HttpRequest() mock_request.method = 'GET' mock_request.GET = request_data['GET'] config = ReportConfig() # see ReportConfig.query_string() object.__setattr__(config, '_id', 'dummy') config.name = _("Emailed report") config.report_type = report_type config.report_slug = report_slug config.owner_id = user_id config.domain = domain config.start_date = request_data['datespan'].startdate.date() if request_data['datespan'].enddate: config.date_range = 'range' config.end_date = request_data['datespan'].enddate.date() else: config.date_range = 'since' GET = dict(six.iterlists(request_data['GET'])) exclude = ['startdate', 'enddate', 'subject', 'send_to_owner', 'notes', 'recipient_emails'] filters = {} for field in GET: if field not in exclude: filters[field] = GET.get(field) config.filters = filters subject = cleaned_data['subject'] or _("Email report from CommCare HQ") try: content = _render_report_configs( mock_request, [config], domain, user_id, couch_user, True, lang=couch_user.language, notes=cleaned_data['notes'], once=once )[0] body = render_full_report_notification(None, content).content for recipient in recipient_emails: send_HTML_email(subject, recipient, body, email_from=settings.DEFAULT_FROM_EMAIL, smtp_exception_skip_list=LARGE_FILE_SIZE_ERROR_CODES) except Exception as er: notify_exception( None, message="Encountered error while generating report or sending email", details={ 'subject': subject, 'recipients': str(recipient_emails), 'error': er, } ) if getattr(er, 'smtp_code', None) in LARGE_FILE_SIZE_ERROR_CODES or type(er) == ESError: # If the email doesn't work because it is too large to fit in the HTML body, # send it as an excel attachment. report_state = { 'request': request_data, 'request_params': json_request(request_data['GET']), 'domain': domain, 'context': {}, } export_all_rows_task(config.report, report_state, recipient_list=recipient_emails) else: self.retry(exc=er)
def query(request): form_dict = dict(six.iterlists(request.GET)) df = get_df(form_dict) # KPI字典 kpi = get_kpi(df["销售"], df["带指标销售"], df["指标"]) # 是否只显示前200条结果,显示过多结果会导致前端渲染性能不足 show_limit_results = form_dict["toggle_limit_show"][0] # 综合表现指标汇总 ptable = format_table( get_ptable(df_sales=df["销售"], df_target=df["指标"], show_limit_results=show_limit_results), "ptable", ) ptable_comm = format_table( get_ptable_comm( df_sales=df["销售"], df_sales_comm=df["社区销售"], df_target_comm=df["社区指标"], show_limit_results=show_limit_results, ), "ptable_comm", ) # 月度表现趋势表格 ptable_monthly = get_ptable_monthly(df_sales=df["销售"], show_limit_results=show_limit_results) ptable_comm_monthly = {} temp = get_ptable_monthly(df_sales=df["社区销售"], show_limit_results=show_limit_results) for k, v in temp.items(): ptable_comm_monthly[k.replace("ptable_monthly", "ptable_comm_monthly")] = v.replace( "ptable_monthly", "ptable_comm_monthly") # 月度社区销售占比趋势 ptable_comm_ratio_monthly = format_table( get_ratio_monthly( df1=df["社区销售"], df2=df["销售"], table_name="社区销售占比趋势", show_limit_results=show_limit_results, ), "ptable_comm_ratio_monthly", ) # 开户医院单产趋势 ptable_hppdt_monthly = format_table( get_ratio_monthly( df1=df["销售"], df2=df["开户医院数"], table_name="开户医院单产趋势", show_limit_results=show_limit_results, ), "ptable_hppdt_monthly", ) # 代表单产趋势 ptable_rsppdt_monthly = format_table( get_ratio_monthly( df1=df["销售"], df2=df["代表数"], table_name="代表单产趋势", show_limit_results=show_limit_results, ), "ptable_rsppdt_monthly", ) # Pyecharts交互图表 bar_total_monthly_trend = prepare_chart(df["销售"], df["指标"], "bar_total_monthly_trend", form_dict) scatter_sales_abs_diff = prepare_chart(df["销售"], df["指标"], "scatter_sales_abs_diff", form_dict) scatter_sales_comm_abs_diff = prepare_chart(df["社区销售"], df["社区指标"], "scatter_sales_abs_diff", form_dict) # pie_product = json.loads(prepare_chart(df_sales, df_target, "pie_product", form_dict)) context = { "show_limit_results": show_limit_results, "ptable": ptable, "ptable_comm": ptable_comm, "ptable_comm_ratio_monthly": ptable_comm_ratio_monthly, "ptable_hppdt_monthly": ptable_hppdt_monthly, "ptable_rsppdt_monthly": ptable_rsppdt_monthly, "bar_total_monthly_trend": bar_total_monthly_trend, "scatter_sales_abs_diff": scatter_sales_abs_diff, "scatter_sales_comm_abs_diff": scatter_sales_comm_abs_diff, # "pie_product": pie_product, } context = dict(context, **kpi) context = dict(context, **ptable_monthly) context = dict(context, **ptable_comm_monthly) return HttpResponse( json.dumps(context, ensure_ascii=False), content_type="application/json charset=utf-8", ) # 返回结果必须是json格式
def query(request): print(request.GET) form_dict = dict(six.iterlists(request.GET)) label = D_TRANS[form_dict["PERIOD_select"][0]] + D_TRANS[ form_dict["UNIT_select"][0]] # 分析时间段+单位组成数据标签 pivoted = get_df(form_dict) # KPI kpi = get_kpi(pivoted) # 最新横截面表现表格 table = get_ptable(pivoted, label) ptable = table.to_html( formatters=build_formatters_by_col(table), # 逐列调整表格内数字格式 classes="ui selectable celled table", # 指定表格css class为Semantic UI主题 table_id="ptable", # 指定表格id ) # 趋势表现表格 table = get_ptable_trend(pivoted, label) ptable_trend = table.to_html( formatters=build_formatters_by_col(table), # 逐列调整表格内数字格式 classes="ui selectable celled table", # 指定表格css class为Semantic UI主题 table_id="ptable_trend", # 指定表格id ) # 价格分析表格 table = get_price(form_dict, "Volume") price_table_box = table.to_html( formatters=build_formatters_by_col(table), # 逐列调整表格内数字格式 classes="ui selectable celled table", # 指定表格css class为Semantic UI主题 table_id="price_table_box", # 指定表格id ) table = get_price(form_dict, "Volume (Counting Unit)") price_table_cnt = table.to_html( formatters=build_formatters_by_col(table), # 逐列调整表格内数字格式 classes="ui selectable celled table", # 指定表格css class为Semantic UI主题 table_id="price_table_cnt", # 指定表格id ) # Pyecharts交互图表 bar_total_trend = json.loads( prepare_chart(pivoted, "bar_total_trend", form_dict)) stackarea_abs_trend = json.loads( prepare_chart(pivoted, "stackarea_abs_trend", form_dict)) stackarea_share_trend = json.loads( prepare_chart(pivoted, "stackarea_share_trend", form_dict)) line_gr_trend = json.loads( prepare_chart(pivoted, "line_gr_trend", form_dict)) context = { "label": label, "market_size": kpi["market_size"], "market_gr": kpi["market_gr"], "market_cagr": kpi["market_cagr"], "ptable": ptable, "ptable_trend": ptable_trend, "price_table_box": price_table_box, "price_table_cnt": price_table_cnt, "bar_total_trend": bar_total_trend, "stackarea_abs_trend": stackarea_abs_trend, "stackarea_share_trend": stackarea_share_trend, "line_gr_trend": line_gr_trend, } # 根据查询选线决定是否展示Matplotlib静态图表 if form_dict["toggle_treemap_share"][0] == "true": treemap_share = prepare_chart(pivoted, "treemap_share", form_dict) context["treemap_share"] = treemap_share if form_dict["toggle_bubble_perf"][0] == "true": bubble_performance = prepare_chart(pivoted, "bubble_performance", form_dict) context["bubble_performance"] = bubble_performance return HttpResponse( json.dumps(context, ensure_ascii=False), content_type="application/json charset=utf-8", ) # 返回结果必须是json格式
def send_email_report(self, recipient_emails, domain, report_slug, report_type, request_data, once, cleaned_data): """ Function invokes send_HTML_email to email the html text report. If the report is too large to fit into email then a download link is sent via email to download report :Parameter recipient_list: list of recipient to whom email is to be sent :Parameter domain: domain name :Parameter report_slug: report slug :Parameter report_type: type of the report :Parameter request_data: Dict containing request data :Parameter once boolean argument specifying whether the report is once off report or scheduled report :Parameter cleaned_data: Dict containing cleaned data from the submitted form """ from corehq.apps.reports.views import _render_report_configs, render_full_report_notification user_id = request_data['couch_user'] couch_user = CouchUser.get_by_user_id(user_id) mock_request = HttpRequest() mock_request.method = 'GET' mock_request.GET = request_data['GET'] config = ReportConfig() # see ReportConfig.query_string() object.__setattr__(config, '_id', 'dummy') config.name = _("Emailed report") config.report_type = report_type config.report_slug = report_slug config.owner_id = user_id config.domain = domain config.start_date = request_data['datespan'].startdate.date() if request_data['datespan'].enddate: config.date_range = 'range' config.end_date = request_data['datespan'].enddate.date() else: config.date_range = 'since' GET = dict(six.iterlists(request_data['GET'])) exclude = [ 'startdate', 'enddate', 'subject', 'send_to_owner', 'notes', 'recipient_emails' ] filters = {} for field in GET: if field not in exclude: filters[field] = GET.get(field) config.filters = filters subject = cleaned_data['subject'] or _("Email report from CommCare HQ") content = _render_report_configs(mock_request, [config], domain, user_id, couch_user, True, lang=couch_user.language, notes=cleaned_data['notes'], once=once)[0] body = render_full_report_notification(None, content).content try: for recipient in recipient_emails: send_HTML_email( subject, recipient, body, email_from=settings.DEFAULT_FROM_EMAIL, smtp_exception_skip_list=LARGE_FILE_SIZE_ERROR_CODES) except Exception as er: if getattr(er, 'smtp_code', None) in LARGE_FILE_SIZE_ERROR_CODES: # If the smtp server rejects the email because of its large size. # Then sends the report download link in the email. report_state = { 'request': request_data, 'request_params': json_request(request_data['GET']), 'domain': domain, 'context': {}, } export_all_rows_task(config.report, report_state, recipient_list=recipient_emails) else: self.retry(exc=er)