Пример #1
1
from docxtpl import DocxTemplate
import jinja2

doc = DocxTemplate("TemplateLGULiPADAccountRegistration.docx")
context = { 'Municipality' : "Munishipariti" }
doc.render(context)
doc.save("generated_doc.docx")
Пример #2
0
def run(dictionnaire, template, dst):
    """Fonction qui prend en paramètre, le dictionnaire de contenu du fichier
    source, un template '.docx' où va être écrit le contenu du dictionnaire
    et un chemin de destination où sera enregistré le fichier final.
    """

    tpl = DocxTemplate(template)

    for fiche in dictionnaire['Fiches']:
        for key1, value1 in fiche.iteritems():
            if(isinstance(value1, basestring) and
               ('Exigences' in key1 or 'Pre-requis' in key1)):

                value1 = value1.replace('\t', '')
                while value1.endswith('\n'):
                    value1 = value1[:-2]
                while value1.startswith('\n'):
                    value1 = value1[1:]
                fiche[key1] = RichText(value1)

            elif isinstance(value1, list):
                for elem in value1:
                    for key2, value2 in elem.iteritems():
                        elem[key2] = RichText(value2)

    context = dictionnaire

    tpl.render(context)
    tpl.save(dst)
Пример #3
0
def download(file_type):
    '''下载接口'''

    if file_type not in ['form','scheme']:abort(404)
    id = request.form.get('id')
    type = request.form.get('type')
    data = query_data(type,id)
    #下载策划
    if file_type == 'scheme':
        if data.filename == 'Nothing':abort(404)
        content = send_file(path.join(Upload_path,data.rand_filename))
        filename = quote(data.filename)
    #if data.applicant!=current_user.name :abort(404)
    else :
        #生成context并进行渲染
        context=make_context(data,type)
        for key,value in context.items() :
            context[key] = RichText(value)     
        doc = DocxTemplate(path.join(Docx_path,type+'.docx'))
        doc.render(context)
        temp_file = path.join(Upload_path,str(current_user.id) +'result.docx')
        doc.save(temp_file)
        #读取渲染后的文件并将之删除
        with open(temp_file,'rb') as f:
            content = f.read()
        if path.exists(temp_file):
            remove(temp_file)
        filename = quote(data.association+'-'+types[type][1]+'.docx')     
 
    response = make_response(content)
    response.headers['Content-Disposition'] = \
    "attachment;filename*=UTF-8''" + filename
    response.headers['Content-Type'] = 'application/octet-stream'
    return response
Пример #4
0
def gen_a_doc(doc_name, preparation_module=None):
    """
    :param doc_name:
     It is a string, that contains a template name to render.
     Like if we have a report_template.docx than
     to the doc_name should be passed a string 'report_template'
     Nota Bene! There is to be a data-cooker. Called the same as the template
     For example: report_template.py
     And it has to contain a method context(), that returns
     a context dictionary for jinja2 rendering engine.
    :return:
    An file name located in TMP_DEST
    """
    if preparation_module is None:
        preparation_module = doc_name  # WOODOO MAGIC !!!!
    DOC_TEMPLATES_DIR = getattr(settings, "DOC_TEMPLATES_DIR", None)
    DOC_CONTEXT_GEN_DIR = getattr(settings, "DOC_CONTEXT_GEN_DIR", None)
    PROJECT_ROOT = getattr(settings, "PROJECT_ROOT", None)
    TMP_DEST = getattr(settings, "TMP_DEST", None)
    TMP_URL = getattr(settings, "TMP_URL", None)

    doc = DocxTemplate(os.path.join(PROJECT_ROOT, os.path.join(DOC_TEMPLATES_DIR, doc_name + ".docx")))
    print(os.path.join(PROJECT_ROOT, os.path.join(DOC_CONTEXT_GEN_DIR, preparation_module)))
    context_getter = import_module(preparation_module)
    context = getattr(context_getter, "context")()
    doc.render(context)
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d_%H:%M:%S")
    completeName = os.path.join(TMP_DEST, doc_name + st + ".docx")
    doc.save(completeName)
    return TMP_URL + doc_name + st + ".docx"
Пример #5
0
    def test_get_env(self):
        ''' 测试 get_env 方法 '''
        doc = DocxTemplate(misc.file_open('sell/template/sell.order.docx').name)
        data = self.env['sell.order'].search([('name', '=', 'SO00001')])

        ctx={'obj':data,'tpl':doc}

        jinja_env = report_helper.get_env()
        doc.render(ctx,jinja_env)
Пример #6
0
 def import_excel(self, **post):
     fp = StringIO()
     datas = post['excel']
     if datas:
         if re.match(r".*.xls.?",datas.filename):
             tmp = datas.stream.read()
             if tmp:
                 ExcelFile = xlrd.open_workbook(filename=datas.filename,file_contents=tmp)
     else:
         return '请选择要导入的文件'
     if ExcelFile:
         word_content = []
         path = os.path.abspath(os.path.dirname(sys.argv[0]))
         tpl = DocxTemplate(path.replace('\\', '/') + '/myaddons/abc_ipt/test_word.docx')
         sheet = ExcelFile.sheet_by_index(0)
         word_page = []
         row_list = []
         for i in range(1, sheet.nrows):
             for k in range(len(sheet.row(i)),7):
                 sheet.row(i).append(xlrd.sheet.Cell(""))
             row_list = sheet.row(i)
             print type(row_list[1])
             create_dict = {
                 'address': row_list[1].value or '',
                 'department': row_list[2].value  or '',
                 'office': row_list[3].value or '',
                 'name': row_list[4].value or '',
                 'numbers': str(int(row_list[5].value)) if row_list[5].value else '',
                 'mac':str((row_list[6].value)) or ''
             }
             http.request.env['abc_ipt.ip_phone_importing'].sudo().create(create_dict)
             word_page.append(create_dict)
             if len(word_page) == 2:
                 word_content.append(word_page)
                 word_page = []
                 continue
             if i == sheet.nrows - 1:
                 word_page.append({
                     'address': '',
                     'department': '',
                     'office': '',
                     'name': '',
                     'number': '',
                     'mac':''
                 })
                 word_content.append(word_page)
         content = {}
         content['info'] = word_content
         tpl.render(content)
         filename = path.replace('\\', '/') + '/myaddons/abc_ipt/tag.docx'
         tpl.save(filename)
         return filename
Пример #7
0
def test():
    """
    演示了如何使用,可以直接执行该文件,但是需要使用自己写的docx模版,和图片
    """
    tpl = DocxTemplate("tpls/test_tpl.docx")
    #读取图片的数据且使用base64编码
    data = open('tpls/python_logo.png','rb').read().encode('base64')
    obj={'logo':data}
    # 需要添加模版对象
    ctx={'obj':obj,'tpl':tpl}
    jinja_env = get_env()
    tpl.render(ctx,jinja_env)

    tpl.save('tpls/test.docx')
Пример #8
0
def document_creator(message):
    try:
        pk = int(message.get('pk').decode())
    except ValueError as e:
        logger.error(e)
        return
    try:
        poll_result = PollResult.objects.get(pk=pk)
    except PollResult.DoesNotExist as e:
        logger.error(e)
        return

    for template in poll_result.poll.templates.objects.all():
        doc = DocxTemplate(template.file)
        doc.render(poll_result.poll_result)
        doc.save()
Пример #9
0
    def create_source_docx(self, cr, uid, ids, report, context=None):
        data = DataModelProxy(self.get_docx_data(cr, uid, ids, report, context))

        foldname = os.getcwd()
        temp_out_file = os.path.join(foldname, 'temp_out_%s.docx' % os.getpid())

        report_stream = ''
        doc = DocxTemplate(misc.file_open(report.template_file).name)
        doc.render({'obj': data})
        doc.save(temp_out_file)

        with open(temp_out_file, 'rb') as input_stream:
            report_stream = input_stream.read()

        os.remove(temp_out_file)
        return (report_stream, report.report_type)
Пример #10
0
    def memo_docx(self, request, pk=None):
        rotation_request = get_object_or_404(RotationRequest, pk=pk)
        department = rotation_request.requested_department.get_department()
        intern = rotation_request.internship.intern

        # Check if memo is expected
        department_requires_memo = department.requires_memo
        if not department_requires_memo:
            raise ForwardNotExpected("This rotation request does not require a forward.")

        template_name = "inside_request" if department.hospital.is_kamc else "outside_request"
        template = DocumentTemplate.objects.get(codename=template_name)

        docx = DocxTemplate(template.template_file)
        context = {
            'now': timezone.now(),
            'contact_name': department.contact_name,
            'contact_position': department.contact_position,
            'hospital': department.hospital.name,
            'intern_name': intern.profile.get_en_full_name(),
            'specialty': rotation_request.specialty.name,
            'month': rotation_request.month.first_day().strftime("%B"),
            'year': rotation_request.month.year,
            'badge_number': intern.badge_number,
            'mobile_number': intern.mobile_number,
            'email': intern.profile.user.email,
        }
        docx.render(context)
        docx_file = StringIO.StringIO()
        docx.save(docx_file)
        docx_file.flush()
        docx_file.seek(0)

        file_name = "Memo - %s - %s %s" % (
            intern.profile.get_en_full_name(),
            rotation_request.month.first_day().strftime("%B"),
            rotation_request.month.year,
        )

        response = HttpResponse(
            FileWrapper(docx_file),
            content_type="application/vnd.openxmlformats-officedocument.wordprocessingml.document"
        )
        response['Content-Disposition'] = 'attachment; filename=%s.docx' % file_name
        return response
Пример #11
0
def run(dico, template, name):

    tpl = DocxTemplate(template)

    for fiche in dico['Fiches']:
        for key1, value1 in fiche.iteritems():
            if isinstance(value1, basestring):
                # fiche[key1] = RichText(value1)
                pass
            elif isinstance(value1, list):
                for elem in value1:
                    for key2, value2 in elem.iteritems():
                        elem[key2] = RichText(value2)   

    context = dico

    tpl.render(context)
    tpl.save(name)
Пример #12
0
    def create_source_docx(self, cr, uid, ids, report, context=None):
        data = self.get_docx_data(cr, uid, ids, report, context)

        foldname = os.getcwd()
        temp_out_file = os.path.join(foldname, 'temp_out_%s.docx' % os.getpid())

        report_stream = ''
        try:
            doc = DocxTemplate(misc.file_open(report.template_file).name)
            doc.render({'obj': data})
            doc.save(temp_out_file)

            with open(temp_out_file, 'r') as input_stream:
                report_stream = input_stream.read()
        except Exception:
            raise Exception
        finally:
            os.remove(temp_out_file)

        return (report_stream, report.report_type)
Пример #13
0
    def create_source_docx(self, cr, uid, ids, report, context=None):
        data = DataModelProxy(self.get_docx_data(cr, uid, ids, report, context))
        tempname = tempfile.mkdtemp()
        temp_out_file = self.generate_temp_file(tempname)

        doc = DocxTemplate(misc.file_open(report.template_file).name)
        #2016-11-2 支持了图片
        #1.导入依赖,python3语法
        from . import report_helper
        #2. 需要添加一个"tpl"属性获得模版对象
        doc.render({'obj': data,'tpl':doc},report_helper.get_env())
        doc.save(temp_out_file)

        if report.output_type == 'pdf':
            temp_file = self.render_to_pdf(temp_out_file)
        else:
            temp_file = temp_out_file

        report_stream = ''
        with open(temp_file, 'rb') as input_stream:
            report_stream = input_stream.read()
        os.remove(temp_file)
        return report_stream, report.output_type
Пример #14
0
    def convert(self, jira_json):
        doc = DocxTemplate(self.templatefile)

        context = {}
        context["ticketnumber"] = jira_json["key"]
        context["priority"] = jira_json["fields"]["priority"]["name"]
        context["pref_resolution_date"] = RFCConverter.cleandate(jira_json["fields"]["duedate"])
        context["createdate"] = RFCConverter.cleandate(jira_json["fields"]["created"])
        context["submitter"] = jira_json["fields"]["creator"]["displayName"]
        context["components"] = ", ".join([x["name"] for x in jira_json["fields"]["components"]])
        context["submittermail"] = jira_json["fields"]["creator"]["emailAddress"]
        context["description"] = R(jira_json["fields"]["description"])
        context["accepted_by"] = jira_json["fields"]["creator"]["displayName"]
        context["print_date"] = RFCConverter.cleandate()
        context["verified_by"] = jira_json["fields"]["creator"]["displayName"]
        context["change_number"] = jira_json["key"]
        doc.render(context)

        target = StringIO()
        doc.save(target)
        target.seek(0)

        name = "RFC_" + jira_json["key"] + ".docx"
        return name, target
Пример #15
0
            float(time_series[keys[len(time_series)-1]]["1. open"]), 2)

        for timestamp, data in time_series.items():

            if float(data["2. high"]) > monthly_high:
                monthly_high = float(data["2. high"])

            if float(data["3. low"]) < monthly_low:
                monthly_low = float(data["3. low"])

            monthly_volume += int(data["5. volume"])

            monthly_avg_price += float(data["4. close"])

        monthly_avg_price /= len(time_series)
        monthly_avg_price = round(monthly_avg_price, 2)
        monthly_change = round(monthly_close - monthly_open, 2)
        

    context = {'ticker': ticker.upper(), 'date': date, 'time': time, 'quote_open': quote_open, 'quote_price1': quote_price, 'quote_high': quote_high, 'quote_low': quote_low,
               'quote_volume': quote_volume, 'quote_last_day': quote_last_day, 'quote_last_close': quote_last_close, 'quote_change': quote_change,
               'quote_change_percent': quote_change_percent,  'intraday_graph': intraday_graph, 'intraday_open': intraday_open,
               'intraday_close': intraday_close, 'intraday_high': intraday_high, 'intraday_low': intraday_low, 'intraday_change': intraday_change,
               'intraday_avg_price': intraday_avg_price, 'intraday_volume': intraday_volume, 'monthly_graph': monthly_graph, 'monthly_open': monthly_open,
               'monthly_close': monthly_close, 'monthly_high': monthly_high, 'monthly_low': monthly_low, 'monthly_change': monthly_change,
               'monthly_avg_price': monthly_avg_price, 'monthly_volume': monthly_volume}

    doc.render(context)
    doc.save(out_file_docx)

Пример #16
0
 def render(self, template, context, file_):
     template_path = os.path.join(script_dir, 'templates', template)
     tpl = DocxTemplate(template_path)
     context = self.format_context(context, tpl)
     tpl.render(context, self.jinja_env)
     tpl.save(file_)
Пример #17
0
class ReportMaker:
    def __init__(self):
        self.tempdir = Path(tempfile.gettempdir())
        self.template_dir = Path(settings.app_dir / "report_docx/templates")
        self.jinja_env = jinja2.Environment()
        for name, function in filters.__dict__.items():
            self.jinja_env.filters[name] = function

    def make_chat(self, query, path=None, chat=None):
        if not path:
            path = self.tempdir / "generated_doc.docx"
        self.tpl = DocxTemplate(self.template_dir / "chat.docx")
        messages = query.all()
        for i, message in enumerate(messages):
            messages[i].data = self._get_data(message)
        context = {'messages': messages, 'chat': chat}
        self.tpl.render(context, self.jinja_env)
        self.tpl.save(path)
        return path

    # def make_images(self, query, n_cols=3):
    #     width = int(150/n_cols)
    #     self.tpl = DocxTemplate(self.template_dir / "images.docx")
    #     files = query.filter(File.type_ == 'image').all()
    #     n = len(files)
    #     n_rows = int(n/n_cols)
    #     if n%n_cols > 0:
    #         n_rows += 1
    #     rows = [[self._get_inline_image(f, width=width) for f in files[i*n_cols: i*n_cols + n_cols]] for i in range(n_rows)]
    #     context = {'rows': rows}
    #     self.tpl.render(context, self.jinja_env)
    #     path = self.tempdir / "generated_doc.docx"
    #     self.tpl.save(path)
    #     return path


    def make_images(self, query, n_cols=3):
        width = Mm(int(150/n_cols))
        files = query.all()
        n = len(files)
        n_rows = int(n/n_cols)
        if n%n_cols > 0:
            n_rows += 1
        rows = [[f for f in files[i*n_cols: i*n_cols + n_cols]] for i in range(n_rows)]
        doc=Document()
        table = doc.add_table(rows = len(rows), cols = n_cols)
        table.style = 'Table Grid'
        path = self.tempdir / "generated_doc.docx"
        for i, row in enumerate(rows):
            for j, file_ in enumerate(row):
                r = table.rows[i].cells[j].paragraphs[0].add_run()
                try:
                    p = file_.path if file_.type_ == 'image' else file_.thumb_path
                    r.add_picture(p, width = width)
                except:
                    table.rows[i].cells[j].text = file_.path
               
        doc.save(path)
        return path

    def make_video_thumbs(self, query, n_cols=3):
        width = int(150/n_cols)
        self.tpl = DocxTemplate(self.template_dir / "images.docx")
        files = query.filter(File.type_ == 'video', File.analise_thumb != None).all()
        n = len(files)
        n_rows = int(n/n_cols)
        if n%n_cols > 0:
            n_rows += 1
        rows = [[self._get_inline_image_thumb(f, width=width) for f in files[i*n_cols: i*n_cols + n_cols]] for i in range(n_rows)]
        context = {'rows': rows}
        self.tpl.render(context, self.jinja_env)
        path = self.tempdir / "generated_doc.docx"
        self.tpl.save(path)
        return path

    # def make_sms(self, query):
    #     doc=Document()
    #     table = doc.add_table(rows = query.count() + 1, cols = 4)
    #     table.style = 'Table Grid'
    #     r = table.rows[i].cells[j].paragraphs[0].add_run()
        
    def _get_inline_image(self, file, width=30):
        path = settings.work_dir / file.path
        if path.exists():
            img = InlineImage(self.tpl, str(path), width=Mm(width))
            if img:
                return img
        return "-- Imagem indisponível --"

    def _get_inline_image_thumb(self, file, width=30):
        path = settings.work_dir / file.thumb_path
        if path.exists():
            img = InlineImage(self.tpl, str(path), width=Mm(width))
            if img:
                return img
        return "-- Imagem indisponível --"


    def _get_data(self, message):
        if message.body:
            return message.body
        if message.attachments.count() > 0:
            attachment = message.attachments[0]
            if attachment.type_ == 'audio':
                return '--Mensagem de áudio--'
            if attachment.type_ == 'image':
                return InlineImage(self.tpl, str(settings.work_dir / attachment.path), width=Mm(30))
            if attachment.type_ == 'video':
                if attachment.has_thumb:
                    return InlineImage(self.tpl, str(settings.work_dir / attachment.thumb_path), width=Mm(40))
                else:
                    return "--Video (miniatura não disponível)--"
            return "--Arquivo--"
        return ""
Пример #18
0
    def create_source_docx_partner(self,
                                   ids,
                                   report,
                                   records,
                                   init_pay,
                                   context=None):
        # 2016-11-2 支持了图片
        # 1.导入依赖,python3语法
        # from . import report_helper
        # 2. 需要添加一个"tpl"属性获得模版对象
        tempname = tempfile.mkdtemp()
        temp_out_file = self.generate_temp_file(tempname)
        doc = DocxTemplate(misc.file_open(report.template_file).name)

        env = api.Environment(context)
        partner = env.get('partner').search([('id', '=',
                                              context.get('partner_id'))])
        simple_dict = {
            'partner_name': partner.name,
            'from_date': context.get('from_date'),
            'to_date': context.get('to_date'),
            'report_line': [],
            'init_pay': {},
            'final_pay': {}
        }
        if not records:
            if init_pay:
                simple_dict['init_pay'] = init_pay
                simple_dict['final_pay'] = init_pay
            doc.render({
                'obj': simple_dict,
                'tpl': doc
            }, report_helper.get_env())
            doc.save(temp_out_file)

            report_stream = ''
            with open(temp_out_file, 'rb') as input_stream:
                report_stream = input_stream.read()

            os.remove(temp_out_file)
            return report_stream, report.output_type

        data = DataModelProxy(records)
        for p_value in data:
            simple_dict['report_line'].append({
                'date':
                p_value.date,
                'name':
                p_value.name,
                'note':
                p_value.note,
                'amount':
                p_value.amount,
                'pay_amount':
                p_value.pay_amount,
                'discount_money':
                p_value.discount_money,
                'balance_amount':
                p_value.balance_amount
            })
        if data:
            simple_dict['init_pay'] = data[0].balance_amount - data[
                0].amount + data[0].pay_amount - data[0].discount_money
            simple_dict['final_pay'] = data[-1].balance_amount

        doc.render({'obj': simple_dict, 'tpl': doc}, report_helper.get_env())
        doc.save(temp_out_file)

        if report.output_type == 'pdf':
            temp_file = self.render_to_pdf(temp_out_file)
        else:
            temp_file = temp_out_file

        report_stream = ''
        with open(temp_file, 'rb') as input_stream:
            report_stream = input_stream.read()

        os.remove(temp_file)
        return report_stream, report.output_type
Пример #19
0
def gerate_report(data: dict):
    template = DocxTemplate("media/template/template.docx")
    context = data
    template.render(context)
    template.save(result_path)
Пример #20
0
def fill_jinja(input_file, output_file, context):
    doc = DocxTemplate(input_file)
    # context = find_context()
    doc.render(context)
    doc.save(output_file)
Пример #21
0
def CreateWord(gender, group, surname, name, lastname, number, typeconcession,
               chooseDoc):
    # Проверка на пустоту
    if gender is None or group is None or surname is None or name is None or lastname is None or number is None or typeconcession is None:
        return "Error NoData"
    # Проверка наличия данных
    if gender == '' or group == '' or surname == '' or name == '' or lastname == '' or number == '' or typeconcession == '':
        return "Error NoData"
    # Проверка длинны полученных данных, ограничение 128 символов
    if len(group) > 128 or len(group) > 128 or len(group) > 128 or len(
            group) > 128 or len(group) > 128 or len(group) > 128:
        return "Error Len"

    # Проверка полученного пола, и перевод его в текст
    if gender != "1" and gender != "0":
        return "Error Gender"
    gender = GenderMass[int(gender)]

    # Задание параметров для шаблона и сохранение результата
    random.seed()
    if chooseDoc == '1':
        doc = DocxTemplate("template1.docx")
    elif chooseDoc == '2':
        doc = DocxTemplate("template2.docx")
        #doc = DocxTemplate("BlankMatHelp.docx")
        typeconcession = 10
    else:
        print("chooseDoc error")
        return "Error no chooseDoc"

    if int(typeconcession) < 0 or int(typeconcession) > 10:
        return "Error typeConcession"
    typeconcession = ConcessionMass[int(typeconcession)]

    director = chooseDirector(group)

    context = {
        'gender': gender,
        'group': group,
        'surname': surname,
        'name': name,
        'lastname': lastname,
        'number': number,
        'typeconcession': typeconcession,
        'director': director
    }

    doc.render(context)
    LogFile = True
    File_Path = ""
    while LogFile:
        try:
            File_Path = "temp" + str(random.randint(1, 10000)) + ".docx"
            file = open(File_Path)
            file.close()
        except IOError as e:
            break

    # Формирование ответа для пользователя
    doc.save(File_Path)
    File_Path = os.path.abspath(File_Path)
    fp = open(File_Path, "rb")
    response = HttpResponse(fp.read())
    fp.close()
    file_type = mimetypes.guess_type(File_Path)
    if file_type is None:
        file_type = 'application/octet-stream'
    response['Content-Type'] = file_type
    response['Content-Length'] = str(os.stat(File_Path).st_size)
    response[
        'Content-Disposition'] = "attachment; filename=Zaiavlenui_Na_matpomosh.docx"

    # Чистка временнойго файла
    os.remove(File_Path)
    return response
Пример #22
0
    InlineImage(tpl, pic_dir + '消费金额区间段分布.png', width=Mm(width_pic)),
    '刷卡与二维码金额比例':
    InlineImage(tpl, pic_dir + '刷卡与二维码金额比例.png', width=Mm(width_pic / 2)),
    '刷卡与二维码笔数比例':
    InlineImage(tpl, pic_dir + '刷卡与二维码笔数比例.png', width=Mm(width_pic / 2)),
    '贷记卡5000元以上交易金额随时间变化曲线':
    InlineImage(tpl,
                pic_dir + '贷记卡5000元以上交易金额随时间变化曲线.png',
                width=Mm(width_pic)),
    '贷记卡5000元以上交易笔数随时间变化曲线':
    InlineImage(tpl,
                pic_dir + '贷记卡5000元以上交易笔数随时间变化曲线.png',
                width=Mm(width_pic)),
    '二维码交易金额区间笔数':
    InlineImage(tpl, pic_dir + '二维码交易金额区间笔数.png', width=Mm(width_pic)),
    '刷卡交易金额区间笔数':
    InlineImage(tpl, pic_dir + '刷卡交易金额区间笔数.png', width=Mm(width_pic)),
    '支付方式交易金额随时间变化曲线':
    InlineImage(tpl, pic_dir + '支付方式交易金额随时间变化曲线.png', width=Mm(width_pic)),
    '支付方式交易笔均金额随时间变化曲线':
    InlineImage(tpl, pic_dir + '支付方式交易笔均金额随时间变化曲线.png', width=Mm(width_pic)),
}

table.update(context)
table.update(image)
jinja_env = jinja2.Environment(autoescape=True)
tpl.render(table, jinja_env)
tpl.save(document_dir)

# %%
Пример #23
0
class doc(object):
    def __init__(self):
        base_url = getDir() + '/user/user_template/'            #初始化生成一个doc对象
        asset_url = base_url + 'demo.docx'
        self.tpl = DocxTemplate(asset_url)
        self.webvul = self.tpl.new_subdoc()                #web应用漏洞
        self.appvul = self.tpl.new_subdoc()                #应用程序漏洞
        self.devicevul = self.tpl.new_subdoc()             #网络设备漏洞
        self.sysvul = self.tpl.new_subdoc()                #操作系统应用漏洞
        self.time = self.tpl.new_subdoc()
        self.number = 0

    def add_title(self, title, type , level=3):
        doc = self.check_type(type)
        run = doc.add_heading('', level=level).add_run(title)
        run.font.name = "宋体"
        run.font.size = Pt(14)
        r = run.element
        r.rPr.rFonts.set(qn('w:eastAsia'), u'宋体')

    def add_table(self,jsons, type=1):
        '''
        在doc中添加表格
        :param json:
                type:1为web漏洞2为应用程序漏洞,3为网络设备漏洞4为系统漏洞
        :return:
        '''

        #获取要写入的subdoc节点
        doc = self.check_type(type)

        for json in jsons:
            # j控制json字典下标
            #print(json)
            j = 0
            title = json['title']                                 #生成标题
            self.number += 1
            self.add_title(title,type=type)
            table = doc.add_table(rows=len(json)-1, cols=2, style="Style1")#新建一个表格
            table.autofit = False
            for key, value in json.items():
                if j == 0:
                    j +=1
                else:
                    table.columns[0].width = Cm(3)
                    table.columns[1].width = Cm(12)
                    table.cell(j - 1, 0).width = Cm(3)   #设置单元格宽度
                    table.cell(j - 1, 1).width = Cm(12)
                    #table.alignment=WD_TABLE_ALIGNMENT.RIGHT 设置对齐方式

                    keyCell = table.cell(j-1, 0)            #表格赋值
                    valueCell = table.cell(j-1, 1)

                    #设置key单元格字体与字体大小
                    key_paragraph = keyCell.paragraphs[0]
                    #keyRun = keyCell.paragraphs[0].add_run(key)
                    keyRun = key_paragraph.add_run(key)
                    keyRun.font.name = u'微软雅黑'  # 设置字体
                    keyRun._element.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑')
                    keyRun.font.size = Pt(10.5)  # 设置字号为五号
                    key_paragraph.paragraph_format.line_spacing = WD_LINE_SPACING.ONE_POINT_FIVE#设置1.5倍行间距
                    key_paragraph.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.RIGHT #设置水平对齐方式右对齐
                    keyCell.vertical_alignment = WD_CELL_VERTICAL_ALIGNMENT.CENTER

                    #设置value单元格字体与字体大小
                    val_paragraph = valueCell.paragraphs[0]

                    #valueRun = valueCell.paragraphs[0].add_run(value)  # 填入的内容

                    valueRun = val_paragraph.add_run(value)
                    valueRun.font.name = u'微软雅黑'  # 设置字体
                    valueRun.font.size = Pt(10.5)  # 设置字号为五号
                    valueRun._element.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑')
                    val_paragraph.paragraph_format.line_spacing_rule = WD_LINE_SPACING.ONE_POINT_FIVE#设置1.5倍行间距
                    val_paragraph.paragraph_format.alignment = WD_PARAGRAPH_ALIGNMENT.LEFT#设置水平对齐方式左对齐
                    valueCell.vertical_alignment = WD_CELL_VERTICAL_ALIGNMENT.CENTER
                    #keyCell.text = key
                    #valueCell.text = value
                    #print(key,value)
                    j = j+1
        #赋值给初始话的subdoc值
        if type==1:
            self.webvul=doc
        elif type==2:
            self.appvul= doc
        elif type==3:
            self.devicevul = doc
        else:
            self.sysvul = doc

    #保存docx文档
    def save_doc(self,current_time, start_time):
        #添加doc编辑时间
        filename = "上海驭胜信息安全通告(%s至%s).docx" % (str(start_time),str(current_time))
        content = {'subdoc': self.webvul,
                        'appdoc':self.appvul,
                        'devicedoc': self.devicevul,
                        'sysdoc': self.sysvul,
                        'time': current_time ,
                        'start_time': start_time,
                        'end_time': current_time}
        self.tpl.render(content)
        self.tpl.save(filename)
        print("file %s success to save!" % filename)

    def check_type(self,num):
        if num==1:
            return self.webvul
        elif num ==2:
            return self.appvul
        elif num ==3:
            return self.devicevul
        else:
            return self.sysvul

    def add_hyperlink(self,paragraph, text, url):
        # This gets access to the document.xml.rels file and gets a new relation id value
        part = paragraph.part
        r_id = part.relate_to(url, docx.opc.constants.RELATIONSHIP_TYPE.HYPERLINK, is_external=True)

        # Create the w:hyperlink tag and add needed values
        hyperlink = docx.oxml.shared.OxmlElement('w:hyperlink')
        hyperlink.set(docx.oxml.shared.qn('r:id'), r_id, )

        # Create a w:r element and a new w:rPr element
        new_run = docx.oxml.shared.OxmlElement('w:r')
        rPr = docx.oxml.shared.OxmlElement('w:rPr')

        # Join all the xml elements together add add the required text to the w:r element
        new_run.append(rPr)
        new_run.text = text
        hyperlink.append(new_run)

        # Create a new Run object and add the hyperlink into it
        r = paragraph.add_run ()
        r.font.name = u'微软雅黑'  # 设置字体
        r.font.size = Pt(10.5)  # 设置字号为五号
        r._element.rPr.rFonts.set(qn('w:eastAsia'), u'微软雅黑')
        r._r.append (hyperlink)

        # A workaround for the lack of a hyperlink style (doesn't go purple after using the link)
        # Delete this if using a template that has the hyperlink style in it
        r.font.color.theme_color = MSO_THEME_COLOR_INDEX.HYPERLINK
        r.font.underline = True

        return hyperlink
Пример #24
0
def read_excel():
    zpath = os.getcwd() + '/'
    try:
        filename = input('输入文件名: ')
        print(filename)

        my_file_name = zpath + filename

        my_file = Path(my_file_name)
        if my_file.exists():
            pass
        else:
            print("文件不存在,重新输入文件名称!")
            os._exit()

        my_dir_name = zpath + filename.replace('.xls', '')

        my_dir = Path(my_dir_name)
        if my_dir.exists():
            pass
        else:
            os.makedirs(my_dir)
            print("创建文件存储目录")
        # 打开excel
        x1 = xlrd.open_workbook(my_file_name)
        # 打开sheet1
        table = x1.sheet_by_index(0)
        nrows = table.nrows

        print('生成报告数:' + str(nrows - 1))

        for i in range(nrows - 1):
            number = table.cell_value(i + 1, 1)
            if number is None:
                break
            id = str(table.cell_value(i + 1, 2)).strip()
            name = str(table.cell_value(i + 1, 3)).strip()
            sex = str(table.cell_value(i + 1, 4)).strip()
            age = table.cell_value(i + 1, 5)


            tpl = DocxTemplate(zpath + 'person.docx')
            context = {
                'number': number,
                'id': id,
                'name': name,
                'sex': sex,
            }

            if age is None:
                context['age'] = ''
            else:
                if isinstance(age, float):
                    context['age'] = int(float(age))
                else:
                    context['age'] = age

            temp = str(i + 1)
            saveFileName = my_dir_name + '/' + \
                           name.strip() + '_' + \
                           "_" + temp + '.docx'
            print(saveFileName)
            tpl.render(context)
            tpl.save(saveFileName)

    except Exception as err:
        print("err %s: " % err)
        blogpath = os.path.join(zpath, 'log_err.txt')
        f = open(blogpath, 'w+')
        f.writelines(repr(err))
        f.close()
Пример #25
0
def odp2(request):
    name_spolka_do_analizy_ver1 = request.GET["spółka_do_analizy_ver1"]

    name_spolka_do_analizy_ver2 = request.GET["spółka_do_analizy_ver2"]

    name2 = str(szukanie(name_spolka_do_analizy_ver2))
    name1 = str(szukanie(name_spolka_do_analizy_ver1))

    if (name1 != 'None'):
        flaga = True
    else:
        flaga = False
    if (name2 != 'None'):
        flaga = True
    else:
        flaga = False

    if flaga == True:

        www_ver1 = 'https://stooq.pl/q/d/l/?s=' + name1 + '&i=d'
        www_ver2 = 'https://stooq.pl/q/d/l/?s=' + name2 + '&i=d'

        odp1 = download(www_ver1, name1)
        odp2 = download(www_ver2, name2)

        dane1 = pd.read_csv(odp1, sep=',', na_values=" ").dropna()
        dane2 = pd.read_csv(odp2, sep=',', na_values=" ").dropna()

        a = dane1
        b = dane2

        all_Data1 = []
        all_Data2 = []

        for i in range(a.shape[0]):
            temp = a.iloc[i]
            all_Data1.append(dict(temp))

        for i in range(b.shape[0]):
            temp = b.iloc[i]
            all_Data2.append(dict(temp))

    baza = Nazwy_spółek.objects.all()
    top = Popular.objects.all()
    context = {
        'flaga': flaga,
        'data': baza,
        'name1': name1,
        'name2': name2,
        'top': top,
    }
    if (flaga == True):
        del all_Data1[-1]
        del all_Data2[-1]

        ver1 = Nazwy_spółek.objects.get(spolka_data_skrot=name1)
        ver2 = Nazwy_spółek.objects.get(spolka_data_skrot=name2)

        for data in all_Data1:
            if Dane_spółek.objects.filter(
                    spolka_name=ver1,
                    spolka_data=data["b'Data"]).exists() == False:
                Dane_spółek.objects.create(
                    spolka_name=ver1,
                    spolka_otwarcie=data['Otwarcie'],
                    spolka_najwyzszy=data['Najwyzszy'],
                    spolka_najnizszy=data['Najnizszy'],
                    spolka_zamkniecie=data['Zamkniecie'],
                    spolka_data=data["b'Data"])
        for data in all_Data2:
            if Dane_spółek.objects.filter(
                    spolka_name=ver2,
                    spolka_data=data["b'Data"]).exists() == False:
                Dane_spółek.objects.create(
                    spolka_name=ver2,
                    spolka_otwarcie=data['Otwarcie'],
                    spolka_najwyzszy=data['Najwyzszy'],
                    spolka_najnizszy=data['Najnizszy'],
                    spolka_zamkniecie=data['Zamkniecie'],
                    spolka_data=data["b'Data"])

        dane_1 = Dane_spółek.objects.all().filter(spolka_name=ver1)
        dane_2 = Dane_spółek.objects.all().filter(spolka_name=ver2)
        # /////////////////////
        Mx1 = srednia(dane1['Otwarcie'])
        My1 = timesrednia(len(dane1))

        # My1 = srednia(dane1)

        Mx2 = srednia(dane2['Otwarcie'])
        My2 = timesrednia(len(dane2))

        # My1 = srednia(dane1)

        Sx1 = odchylenie(dane1['Otwarcie'], Mx1)
        Sy1 = odchylenieczas(len(dane1), My1)
        Sx2 = odchylenie(dane2['Otwarcie'], Mx2)
        Sy2 = odchylenieczas(len(dane2), My2)

        # n = len(dane1['Otwarcie'])
        # vr = pd.DataFrame(dane1[:])
        # vry2 = sumowanie2(len(dane1))
        # vrxy = sumowanie(len(dane1), dane1['Otwarcie'])
        # vrx2 = dane1['Otwarcie'] * dane1['Otwarcie']
        # sumx = dane1['Otwarcie'].sum()

        today = date.today()

        dane_akt_1 = Dane_spółek.objects.all().filter(
            spolka_name=ver1, spolka_data__year=today.year - 1)
        dane_akt_2 = Dane_spółek.objects.all().filter(
            spolka_name=ver2, spolka_data__year=today.year - 1)

        flaga20 = False
        flaga19 = False
        flaga18 = False

        if Dane_spółek.objects.filter(
                spolka_name=ver1, spolka_data__year='2020').exists() == True:
            dane_2020 = Dane_spółek.objects.all().filter(
                spolka_name=ver1, spolka_data__year='2020')
            flaga20 = True
        if Dane_spółek.objects.filter(
                spolka_name=ver1, spolka_data__year='2019').exists() == True:
            dane_2019 = Dane_spółek.objects.all().filter(
                spolka_name=ver1, spolka_data__year='2019')
            flaga19 = True
        if Dane_spółek.objects.filter(
                spolka_name=ver1, spolka_data__year='2018').exists() == True:
            dane_2018 = Dane_spółek.objects.all().filter(
                spolka_name=ver1, spolka_data__year='2018')
            flaga18 = True

        flaga20s = False
        flaga19s = False
        flaga18s = False

        if Dane_spółek.objects.filter(
                spolka_name=ver2, spolka_data__year='2020').exists() == True:
            dane_2020_2 = Dane_spółek.objects.all().filter(
                spolka_name=ver2, spolka_data__year='2020')
            flaga20s = True
        if Dane_spółek.objects.filter(
                spolka_name=ver2, spolka_data__year='2019').exists() == True:
            dane_2019_2 = Dane_spółek.objects.all().filter(
                spolka_name=ver2, spolka_data__year='2019')
            flaga19s = True
        if Dane_spółek.objects.filter(
                spolka_name=ver2, spolka_data__year='2018').exists() == True:
            dane_2018_2 = Dane_spółek.objects.all().filter(
                spolka_name=ver2, spolka_data__year='2018')
            flaga18s = True

        # dane_2020 = Dane_spółek.objects.all().filter(spolka_name=ver1, spolka_data__year='2020')
        # dane_2019 = Dane_spółek.objects.all().filter(spolka_name=ver1, spolka_data__year='2019')
        # dane_2018 = Dane_spółek.objects.all().filter(spolka_name=ver1, spolka_data__year='2018')

        if (flaga20 == True):
            sum2020 = dane_2020.count()
            op2020 = dane_2020.aggregate(Sum('spolka_najwyzszy'))
            op20202 = dane_2020.aggregate(Sum('spolka_najnizszy'))
            ost = (op2020['spolka_najwyzszy__sum'] -
                   op20202['spolka_najnizszy__sum']) / sum2020
        else:
            ost = 0

        if (flaga19 == True):
            sum1 = dane_2019.count()
            op1 = dane_2019.aggregate(Sum('spolka_najwyzszy'))
            op2 = dane_2019.aggregate(Sum('spolka_najnizszy'))
            kk = (op1['spolka_najwyzszy__sum'] -
                  op2['spolka_najnizszy__sum']) / sum1

        else:
            kk = 0

        if (flaga18 == True):
            sum8 = dane_2018.count()
            op8 = dane_2018.aggregate(Sum('spolka_najwyzszy'))
            op81 = dane_2018.aggregate(Sum('spolka_najnizszy'))
            k8 = (op8['spolka_najwyzszy__sum'] -
                  op81['spolka_najnizszy__sum']) / sum8

        else:
            k8 = 0

        # dane_2020_2 = Dane_spółek.objects.all().filter(spolka_name=ver2, spolka_data__year='2020')
        # dane_2019_2 = Dane_spółek.objects.all().filter(spolka_name=ver2, spolka_data__year='2019')
        # dane_2018_2 = Dane_spółek.objects.all().filter(spolka_name=ver2, spolka_data__year='2018')

        if (flaga20s == True):
            sum20201 = dane_2020_2.count()
            op2020v = dane_2020_2.aggregate(Sum('spolka_najwyzszy'))
            op22020v = dane_2020_2.aggregate(Sum('spolka_najnizszy'))
            ost1 = (op2020v['spolka_najwyzszy__sum'] -
                    op22020v['spolka_najnizszy__sum']) / sum20201

        else:
            ost1 = 0

        if (flaga19s == True):
            sum12 = dane_2019_2.count()
            op12 = dane_2019_2.aggregate(Sum('spolka_najwyzszy'))
            op22 = dane_2019_2.aggregate(Sum('spolka_najnizszy'))
            ws = (op12['spolka_najwyzszy__sum'] -
                  op22['spolka_najnizszy__sum']) / sum12

        else:
            ws = 0

        if (flaga18s == True):
            sum82 = dane_2018_2.count()
            op82 = dane_2018_2.aggregate(Sum('spolka_najwyzszy'))
            op821 = dane_2018_2.aggregate(Sum('spolka_najnizszy'))
            w8 = (op82['spolka_najwyzszy__sum'] -
                  op821['spolka_najnizszy__sum']) / sum82

        else:
            w8 = 0

        #
        # k = 2020;
        # pusta_lista = []
        # for i in range(10):
        #     str(k)
        #     dane_x = Dane_spółek.objects.all().filter(spolka_name=ver1, spolka_data__year=k)
        #     k = int(k - 1);
        #     sumx = dane_x.count()
        #     opx = dane_x.aggregate(Sum('spolka_najwyzszy'))
        #     op2x = dane_x.aggregate(Sum('spolka_najnizszy'))
        #     ost = (opx['spolka_najwyzszy__sum'] - op2x['spolka_najnizszy__sum']) / sumx
        #     pusta_lista.append(ost)
        # print(pusta_lista)

        k = 2020
        pusta_lista = []
        if (flaga20 and flaga19 and flaga18 == True):
            for i in range(10):
                str(k)
                if Dane_spółek.objects.filter(
                        spolka_name=ver1,
                        spolka_data__year=k).exists() == True:
                    dane_x = Dane_spółek.objects.all().filter(
                        spolka_name=ver1, spolka_data__year=k)
                    k = int(k - 1)
                    sumx = dane_x.count()
                    opx = dane_x.aggregate(Sum('spolka_najwyzszy'))
                    op2x = dane_x.aggregate(Sum('spolka_najnizszy'))
                    ost = (opx['spolka_najwyzszy__sum'] -
                           op2x['spolka_najnizszy__sum']) / sumx
                    pusta_lista.append(ost)
                else:
                    k = int(k - 1)
                    ost = 0
                    pusta_lista.append(ost)

        else:
            for i in range(10):

                str(k)
                if Dane_spółek.objects.filter(
                        spolka_name=ver1,
                        spolka_data__year=k).exists() == True:
                    dane_x = Dane_spółek.objects.all().filter(
                        spolka_name=ver1, spolka_data__year=k)

                    k = int(k - 1)
                    sumx = dane_x.count()
                    opx = dane_x.aggregate(Sum('spolka_najwyzszy'))
                    op2x = dane_x.aggregate(Sum('spolka_najnizszy'))
                    ost = (opx['spolka_najwyzszy__sum'] -
                           op2x['spolka_najnizszy__sum']) / sumx
                    pusta_lista.append(ost)
                else:
                    k = int(k - 1)
                    ost = 0
                    pusta_lista.append(ost)

        w = 2020

        pusta_lista2 = []
        # for i in range(10):
        #     str(w)
        #     dane_x = Dane_spółek.objects.all().filter(spolka_name=ver2, spolka_data__year=k)
        #     w = int(w - 1);
        #     sumx = dane_x.count()
        #     opx = dane_x.aggregate(Sum('spolka_najwyzszy'))
        #     op2x = dane_x.aggregate(Sum('spolka_najnizszy'))
        #     ost = (opx['spolka_najwyzszy__sum'] - op2x['spolka_najnizszy__sum']) / sumx
        #     pusta_lista2.append(ost)
        if (flaga20s and flaga19s and flaga18s == True):
            for i in range(10):
                str(w)
                if Dane_spółek.objects.filter(
                        spolka_name=ver2,
                        spolka_data__year=w).exists() == True:
                    dane_x = Dane_spółek.objects.all().filter(
                        spolka_name=ver2, spolka_data__year=w)
                    w = int(w - 1)
                    sumx = dane_x.count()
                    opx = dane_x.aggregate(Sum('spolka_najwyzszy'))
                    op2x = dane_x.aggregate(Sum('spolka_najnizszy'))

                    ost = (opx['spolka_najwyzszy__sum'] -
                           op2x['spolka_najnizszy__sum']) / sumx
                    pusta_lista2.append(ost)
                else:
                    w = int(w - 1)
                    ost = 0
                    pusta_lista2.append(ost)

        else:
            for i in range(10):

                str(w)
                if Dane_spółek.objects.filter(
                        spolka_name=ver2,
                        spolka_data__year=w).exists() == True:
                    dane_x = Dane_spółek.objects.all().filter(
                        spolka_name=ver2, spolka_data__year=w)

                    w = int(w - 1)
                    sumx = dane_x.count()
                    opx = dane_x.aggregate(Sum('spolka_najwyzszy'))
                    op2x = dane_x.aggregate(Sum('spolka_najnizszy'))
                    ost = (opx['spolka_najwyzszy__sum'] -
                           op2x['spolka_najnizszy__sum']) / sumx
                    pusta_lista2.append(ost)
                else:
                    w = int(w - 1)
                    ost = 0
                    pusta_lista2.append(ost)

        # sum2020 = dane_2020.count()
        # op2020 = dane_2020.aggregate(Sum('spolka_najwyzszy'))
        # op20202 = dane_2020.aggregate(Sum('spolka_najnizszy'))
        # ost = (op2020['spolka_najwyzszy__sum'] - op20202['spolka_najnizszy__sum']) / sum2020

        #
        # sum1 = dane_2019.count()
        # op1 = dane_2019.aggregate(Sum('spolka_najwyzszy'))
        # op2 = dane_2019.aggregate(Sum('spolka_najnizszy'))
        # k = (op1['spolka_najwyzszy__sum'] - op2['spolka_najnizszy__sum']) / sum1
        # print(k)

        # sum12 = dane_2019_2.count()
        # op12 = dane_2019_2.aggregate(Sum('spolka_najwyzszy'))
        # op22 = dane_2019_2.aggregate(Sum('spolka_najnizszy'))
        # w = (op12['spolka_najwyzszy__sum'] - op22['spolka_najnizszy__sum']) / sum12
        # print(w)

        # sum8 = dane_2018.count()
        # op8 = dane_2018.aggregate(Sum('spolka_najwyzszy'))
        # op81 = dane_2018.aggregate(Sum('spolka_najnizszy'))
        # k8 = (op8['spolka_najwyzszy__sum'] - op81['spolka_najnizszy__sum']) / sum8
        # print(k8)
        #
        # sum82 = dane_2018_2.count()
        # op82 = dane_2018_2.aggregate(Sum('spolka_najwyzszy'))
        # op821 = dane_2018_2.aggregate(Sum('spolka_najnizszy'))
        # w8 = (op82['spolka_najwyzszy__sum'] - op821['spolka_najnizszy__sum']) / sum82
        # print(w8)
        print(kk)
        print(ws)
        print(k8)
        print(w8)
        print(ost)
        print(ost1)
        listaroczna = []
        index = 2020
        for i in range(10):
            listaroczna.append(index)
            index = 2020 - (i + 1)

        dane2 = {
            'lista': listaroczna,
            'dane_1': dane_1,
            'dane_2': dane_2,
            'nazwa_1': name1,
            'nazwa_2': name2,
            'Mx1': Mx1,
            'Mx2': Mx2,
            'My1': My1,
            'My2': My2,
            'Sx1': Sx1,
            'Sx2': Sx2,
            'Sy1': Sy1,
            'Sy2': Sy2,
            'dane_2020': dane_2020,
            'dane_2019': dane_2019,
            'dane_2018': dane_2018,
            'dane_2020_2': dane_2020_2,
            'dane_2019_2': dane_2019_2,
            'dane_2018_2': dane_2018_2,
            'zm': kk,
            'zm2': ws,
            'zm8': k8,
            'zm28': w8,
            'ww': ost,
            'ww2': ost1,
            'lis1': pusta_lista,
            'lis2': pusta_lista2,
            'dane_akt_1': dane_akt_1,
            'dane_akt_2': dane_akt_2,
        }
        doc = DocxTemplate("Docx/my_word_template.docx")
        doc.render(dane2)
        doc.save("Docx/generated_doc_" + name1 + "_vs_" + name2 +
                 "_report.docx")

        # dane = Dane_spółek.objects.all()

        return render(request, "Strona_3_ver2.html", dane2)
    else:
        return render(request, "Strona_2.html", context=context)
def exportWord(jsonPath, productUuid):
    # 解析当前json中信息
    with open(jsonPath, 'r') as f:
        jsonData = json.load(f)
    cloud = jsonData['cloud']  # 云量信息
    totalArea = jsonData['totalArea']  # 蓝藻总面积

    # 生成文字所需信息===============================================================
    issue = os.path.basename(jsonPath).split('_')[3]
    year = int(issue[0:4])
    mm = int(issue[4:6])
    dd = int(issue[6:8])
    hour = int(issue[8:10])
    minute = int(issue[10:12])
    timeStr = '%d月%d日%d时%d分' % (mm, dd, hour, minute)
    totalPercent = jsonData['totalPercent']  # 蓝藻总百分比
    lakeStat = jsonData['lakeStat']  # 蓝藻面积分布区域
    algaeThreshold = jsonData['algaeThreshold']
    lakeRegionList = []
    for key in lakeStat.keys():
        if lakeStat[key] == 1:
            lakeRegionList.append(LAKE_REGION_NAME[key])
    if len(lakeRegionList) == 0:
        lakeRegionStr = ''
    elif len(lakeRegionList) == 1:
        lakeRegionStr = lakeRegionList[0]
    else:
        tempList = lakeRegionList[0:-1]
        lakeRegionStr = '、'.join(tempList) + '和' + lakeRegionList[-1]
    areaWX = jsonData['adminArea']['wuxi']
    areaCZ = jsonData['adminArea']['changzhou']
    areaSZ = jsonData['adminArea']['suzhou']
    percentWX = jsonData['adminPercent']['wuxi']
    percentCZ = jsonData['adminPercent']['changzhou']
    percentSZ = jsonData['adminPercent']['suzhou']
    areaH = jsonData['highArea']
    areaM = jsonData['midArea']
    areaL = jsonData['lowArea']
    percentH = jsonData['highPercent']
    percentM = jsonData['midPercent']
    percentL = jsonData['lowPercent']

    # 计算期号
    nowDatetime = datetime.datetime.strptime(issue[0:8] + '0000', '%Y%m%d%H%M')
    # 3月以前算上一年期号
    if mm < 3:
        startDatetime = datetime.datetime.strptime(str(year) + '01010000', '%Y%m%d%H%M')
    else:
        startDatetime = datetime.datetime.strptime(str(year) + '03010000', '%Y%m%d%H%M')
    num = (nowDatetime - startDatetime).days + 1    # 期号

    label2 = ''
    label3 = ''

    # 蓝藻日报文字部分===================================================
    # 1.全云
    if cloud >= 95:
        description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖全部被云层覆盖,无法判断蓝藻聚集情况。' % timeStr
        description2 = '%sEOS/MODIS卫星遥感影像显示,太湖全部被云层覆盖,无法判断蓝藻聚集情况。' % timeStr
        label1 = '图1   %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd)
        templateID = 1
        typeID = 1
    # 2.有云无藻
    elif 5 < cloud < 95 and totalArea == 0:
        description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖部分湖区被云层覆盖,无云区域内未发现蓝藻聚集现象。' % timeStr
        description2 = '%sEOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内未发现蓝藻聚集现象。' % timeStr
        label1 = '图1   %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd)
        templateID = 1
        typeID = 2
    # 3.无云无藻
    elif cloud <= 5 and totalArea == 0:
        description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖未发现蓝藻聚集现象。' % timeStr
        description2 = '%sEOS/MODIS卫星遥感影像显示,太湖未发现蓝藻聚集现象。' % timeStr
        label1 = '图1   %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd)
        templateID = 1
        typeID = 3
    # 4.有云有藻 面积不大于300
    elif 5 < cloud < 95 and 0 < totalArea <= 300:
        description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米(图2),' \
                      '占全湖总面积的%.1f%%,主要分布在%s。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \
                      '占%d%%;苏州水域%d平方千米,占%d%%。' \
                      % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ,
                         percentSZ)
        description2 = '%sEOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米,' \
                       '占全湖总面积的%.1f%%,主要分布在%s。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \
                       '占%d%%;苏州水域%d平方千米,占%d%%。' \
                       % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ,
                          percentSZ)
        label1 = '图1   %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd)
        label2 = '图2   %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd)
        templateID = 2
        typeID = 4
    # 5.无云有藻 面积不大于300
    elif cloud <= 5 and 0 < totalArea <= 300:
        description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖发现蓝藻聚集面积约%d平方千米(图2),占全湖总面积的%.1f%%,' \
                      '主要分布在%s。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,占%d%%;苏州水域%d平方千米,' \
                      '占%d%%。'\
                      % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ,
                         percentSZ)
        description2 = '%sEOS/MODIS卫星遥感影像显示,太湖发现蓝藻聚集面积约%d平方千米,占全湖总面积的%.1f%%,' \
                       '主要分布在%s。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,占%d%%;苏州水域%d平方千米,' \
                       '占%d%%。'\
                       % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ,
                          percentSZ)
        label1 = '图1   %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd)
        label2 = '图2   %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd)
        templateID = 2
        typeID = 5
    # 6.无云有藻 面积大于300 有高中低聚集区
    elif cloud <= 5 and totalArea > 300 and areaH > 0 and areaM > 0 and areaL > 0:
        description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖发现蓝藻聚集面积约%d平方千米(图2),占全湖总面积的%.1f%%,' \
                      '主要分布在%s。其中,高、中、低聚集区面积分别约为%d平方千米、%d平方千米和%d平方千米,' \
                      '占蓝藻总聚集面积的%d%%、%d%%和%d%%(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;' \
                      '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \
                      % (timeStr, totalArea, totalPercent, lakeRegionStr, areaH, areaM, areaL, percentH, percentM,
                         percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ)
        description2 = '%sEOS/MODIS卫星遥感影像显示,太湖发现蓝藻聚集面积约%d平方千米,占全湖总面积的%.1f%%,' \
                       '主要分布在%s。其中,高、中、低聚集区面积分别约为%d平方千米、%d平方千米和%d平方千米,' \
                       '占蓝藻总聚集面积的%d%%、%d%%和%d%%。按行政边界划分,无锡水域%d平方千米,占%d%%;' \
                       '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \
                       % (timeStr, totalArea, totalPercent, lakeRegionStr, areaH, areaM, areaL, percentH, percentM,
                          percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ)
        label1 = '图1   %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd)
        label2 = '图2   %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd)
        label3 = '图3   %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd)
        templateID = 3
        typeID = 5
    # 7.无云有藻 面积大于300 无高聚集区 有中低聚集区
    elif cloud <= 5 and totalArea > 300 and areaH == 0 and areaM > 0 and areaL > 0:
        description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖发现蓝藻聚集面积约%d平方千米(图2),占全湖总面积的%.1f%%,' \
                      '主要分布在%s。其中,无高聚集区,中、低聚集区面积分别约为%d平方千米和%d平方千米,占蓝藻总聚集面积的%d%%和%d%%' \
                      '(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,占%d%%;苏州水域%d平方千米,' \
                      '占%d%%。' \
                      % (timeStr, totalArea, totalPercent, lakeRegionStr, areaM, areaL, percentM, percentL, areaWX,
                         percentWX, areaCZ, percentCZ, areaSZ, percentSZ)
        description2 = '%sEOS/MODIS卫星遥感影像显示,太湖发现蓝藻聚集面积约%d平方千米,占全湖总面积的%.1f%%,' \
                       '主要分布在%s。其中,无高聚集区,中、低聚集区面积分别约为%d平方千米和%d平方千米,占蓝藻总聚集面积的%d%%和%d%%' \
                       '。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,占%d%%;苏州水域%d平方千米,' \
                       '占%d%%。' \
                       % (timeStr, totalArea, totalPercent, lakeRegionStr, areaM, areaL, percentM, percentL, areaWX,
                          percentWX, areaCZ, percentCZ, areaSZ, percentSZ)
        label1 = '图1   %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd)
        label2 = '图2   %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd)
        label3 = '图3   %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd)
        templateID = 3
        typeID = 5
    # 8.无云有藻 面积大于300 无高中聚集区 有低聚集区
    elif cloud <= 5 and totalArea > 300 and areaH == 0 and areaM == 0 and areaL > 0:
        description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖发现蓝藻聚集面积约%d平方千米(图2),占全湖总面积的%.1f%%,' \
                      '主要分布在%s,全部为低聚集区(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \
                      '占%d%%;苏州水域%d平方千米,占%d%%。' \
                      % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ,
                         percentSZ)
        description2 = '%sEOS/MODIS卫星遥感影像显示,太湖发现蓝藻聚集面积约%d平方千米,占全湖总面积的%.1f%%,' \
                       '主要分布在%s,全部为低聚集区。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \
                       '占%d%%;苏州水域%d平方千米,占%d%%。' \
                       % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ,
                          percentSZ)
        label1 = '图1   %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd)
        label2 = '图2   %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd)
        label3 = '图3   %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd)
        templateID = 3
        typeID = 5
    # 9.有云有藻 面积大于300 有高中低聚集区
    elif 5 < cloud < 95 and totalArea > 300 and areaH > 0 and areaM > 0 and areaL > 0:
        description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米(图2),' \
                      '占全湖总面积的%.1f%%,主要分布在%s。其中,高、中、低聚集区面积分别约为%d平方千米、%d平方千米和%d平方千米,' \
                      '占蓝藻总聚集面积的%d%%、%d%%和%d%%(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;' \
                      '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \
                      % (timeStr, totalArea, totalPercent, lakeRegionStr, areaH, areaM, areaL, percentH, percentM,
                         percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ)
        description2 = '%sEOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米,' \
                       '占全湖总面积的%.1f%%,主要分布在%s。其中,高、中、低聚集区面积分别约为%d平方千米、%d平方千米和%d平方千米,' \
                       '占蓝藻总聚集面积的%d%%、%d%%和%d%%。按行政边界划分,无锡水域%d平方千米,占%d%%;' \
                       '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \
                       % (timeStr, totalArea, totalPercent, lakeRegionStr, areaH, areaM, areaL, percentH, percentM,
                          percentL, areaWX, percentWX, areaCZ, percentCZ, areaSZ, percentSZ)
        label1 = '图1   %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd)
        label2 = '图2   %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd)
        label3 = '图3   %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd)
        templateID = 3
        typeID = 4
    # 10.有云有藻 面积大于300 无高聚集区 有中低聚集区
    elif 5 < cloud < 95 and totalArea > 300 and areaH == 0 and areaM > 0 and areaL > 0:
        description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米(图2),' \
                      '占全湖总面积的%.1f%%,主要分布在%s。其中,无高聚集区,中、低聚集区面积约分别约为%d平方千米和%d平方千米,' \
                      '占蓝藻总聚集面积的%d%%和%d%%(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \
                      '占%d%%;苏州水域%d平方千米,占%d%%。' \
                      % (timeStr, totalArea, totalPercent, lakeRegionStr, areaM, areaL, percentM, percentL, areaWX,
                         percentWX, areaCZ, percentCZ, areaSZ, percentSZ)
        description2 = '%sEOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米,' \
                       '占全湖总面积的%.1f%%,主要分布在%s。其中,无高聚集区,中、低聚集区面积约分别约为%d平方千米和%d平方千米,' \
                       '占蓝藻总聚集面积的%d%%和%d%%。按行政边界划分,无锡水域%d平方千米,占%d%%;常州水域%d平方千米,' \
                       '占%d%%;苏州水域%d平方千米,占%d%%。' \
                       % (timeStr, totalArea, totalPercent, lakeRegionStr, areaM, areaL, percentM, percentL, areaWX,
                          percentWX, areaCZ, percentCZ, areaSZ, percentSZ)
        label1 = '图1   %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd)
        label2 = '图2   %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd)
        label3 = '图3   %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd)
        templateID = 3
        typeID = 4
    # 11.有云有藻 面积大于300 无高中聚集区 有低聚集区
    elif 5 < cloud < 95 and totalArea > 300 and areaH == 0 and areaM == 0 and areaL > 0:
        description = '%sEOS/MODIS卫星遥感影像显示(图1),太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米(图2),' \
                      '占全湖总面积的%.1f%%,主要分布在%s,全部为低聚集区(表1、图3)。按行政边界划分,无锡水域%d平方千米,占%d%%;' \
                      '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \
                      % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ,
                         percentSZ)
        description2 = '%sEOS/MODIS卫星遥感影像显示,太湖部分湖区被云层覆盖,无云区域内发现蓝藻聚集面积约%d平方千米,' \
                       '占全湖总面积的%.1f%%,主要分布在%s,全部为低聚集区。按行政边界划分,无锡水域%d平方千米,占%d%%;' \
                       '常州水域%d平方千米,占%d%%;苏州水域%d平方千米,占%d%%。' \
                       % (timeStr, totalArea, totalPercent, lakeRegionStr, areaWX, percentWX, areaCZ, percentCZ, areaSZ,
                          percentSZ)
        label1 = '图1   %d年%d月%d日太湖区域卫星遥感影像' % (year, mm, dd)
        label2 = '图2   %d年%d月%d日太湖蓝藻遥感监测' % (year, mm, dd)
        label3 = '图3   %d年%d月%d日太湖蓝藻聚集强度分级' % (year, mm, dd)
        templateID = 3
        typeID = 4
    else:
        print('No Match Found!!!')
        return
    print(description)

    # 生成文件====================================================
    # 1.生成日报
    replaceText = {'year': year, 'num': num, 'mm': mm, 'dd': dd, 'description': description, 'label1': label1,
                   'label2': label2, 'label3': label3, 'areaH': areaH, 'areaM': areaM, 'areaL': areaL,
                   'totalArea': totalArea, 'percentH': percentH, 'percentM': percentM, 'percentL': percentL}
    dependDir = globalCfg['depend_path']
    templateDir = os.path.join(dependDir, 'word')
    templatePath = os.path.join(templateDir, 'report_daily' + str(templateID) + '.docx')
    tpl = DocxTemplate(templatePath)
    tpl.render(replaceText)

    jsonBaseName = os.path.basename(jsonPath)
    outputDir = os.path.dirname(jsonPath)
    outWordName = jsonBaseName.replace('.json', '.docx')
    outWordPath = os.path.join(outputDir, outWordName)

    picturePath1 = os.path.join(outputDir, jsonBaseName.replace('.json', '_reportImg1_noPoints.jpg'))
    picturePath2 = os.path.join(outputDir, jsonBaseName.replace('.json', '_reportImg2.jpg'))
    picturePath3 = os.path.join(outputDir, jsonBaseName.replace('.json', '_reportImg3.jpg'))
    if not (os.path.exists(picturePath1) and os.path.exists(picturePath2) and os.path.exists(picturePath2)):
        print('Cannot Find JPG File!!!')
        return
    if templateID == 1:
        replacePic = {"template_picture1.jpg": picturePath1}
    elif templateID == 2:
        replacePic = {"template_picture1.jpg": picturePath1, "template_picture2.jpg": picturePath2}
    elif templateID == 3:
        replacePic = {"template_picture1.jpg": picturePath1, "template_picture2.jpg": picturePath2,
                      "template_picture3.jpg": picturePath3}
    else:
        replacePic = {}
    for key in replacePic.keys():
        tpl.replace_pic(key, replacePic[key])
    if os.path.exists(outWordPath):
        os.remove(outWordPath)
    tpl.save(outWordPath)

    # 2.生成推送所需txt第一段文字,剩余两段文字后续添加
    outTxtName = jsonBaseName.replace('.json', '.txt')
    outTxtPath = os.path.join(outputDir, outTxtName)
    with open(outTxtPath, 'w') as f:
        f.write(description2)

    # 3.生成EXCEL
    xls_num = num
    xls_date = str(year) + '/' + str(mm) + '/' + str(dd)
    xls_time = '%s时%s分' % (str(hour), str(minute))
    xls_threshold = str(algaeThreshold)
    xls_ndviMax = str(jsonData['ndviMax'])
    xls_ndviMin = str(jsonData['ndviMin'])
    xls_ndviMean = str(jsonData['ndviMean'])
    xls_boundary = str(jsonData['boundaryThreshold'])
    xls_area = ''
    if typeID == 4 or typeID == 5:
        xls_area = str(totalArea)
    xls_algae_area = ''
    if typeID == 2 or typeID == 3:
        xls_algae_area = '0'
    elif typeID == 4 or typeID == 5:
        xls_algae_area = str(totalArea)
    xls_high = ''
    if totalArea >= 300 and areaH > 0:
        xls_high = str(areaH)
    xls_mid = ''
    if totalArea >= 300 and areaM > 0:
        xls_mid = str(areaM)
    xls_low = ''
    if totalArea >= 300 and areaL > 0:
        xls_low = str(areaL)
    xls_region = lakeRegionStr
    xls_cloud = str(cloud)
    if cloud > 50 and totalArea == 0:
        xls_activate = '0'
    else:
        xls_activate = '1'
    xls_explain = str(typeID)
    xls_weather = ''
    if cloud <= 5:
        xls_cloud_cover = '无覆盖'
    elif cloud >= 95:
        xls_cloud_cover = '全部覆盖'
    else:
        xls_cloud_cover = '部分覆盖'
    xls_total_percent = '%.2f%%' % totalPercent
    xls_intensity_threshold = ''
    if totalArea >= 300:
        xls_intensity_threshold = '%.3f-%.3f,%.3f-%.3f,%.3f-%.3f' \
                                  % (jsonData['ndviMin'], jsonData['insThreshold1'], jsonData['insThreshold1'],
                                     jsonData['insThreshold2'], jsonData['insThreshold2'], jsonData['ndviMax'])
    outXlsxName = jsonBaseName.replace('.json', '.xlsx')
    outXlsxPath = os.path.join(outputDir, outXlsxName)
    if os.path.exists(outXlsxPath):
        os.remove(outXlsxPath)
    workBook = xlsxwriter.Workbook(outXlsxPath)
    sheet = workBook.add_worksheet()
    writeTable = {'A1': '报告期数', 'A2': xls_num,
                  'B1': '日期', 'B2': xls_date,
                  'C1': '时间', 'C2': xls_time,
                  'D1': 'NDVI阈值', 'D2': xls_threshold,
                  'E1': 'NDVI最大值(蓝藻区域)', 'E2': xls_ndviMax,
                  'F1': 'NDVI最小值(蓝藻区域)', 'F2': xls_ndviMin,
                  'G1': 'NDVI均值(蓝藻区域)', 'G2': xls_ndviMean,
                  'H1': '边界缩放', 'H2': xls_boundary,
                  'I1': '面积(km2)无云无藻不填', 'I2': xls_area,
                  'J1': '蓝藻面积(无云无藻填0,全云不填,其他按面积填)', 'J2': xls_algae_area,
                  'K1': '高聚区面积', 'K2': xls_high,
                  'L1': '中聚区面积', 'L2': xls_mid,
                  'M1': '低聚区面积', 'M2': xls_low,
                  'N1': '分布范围(竺山湖、梅梁湖、贡湖、西部沿岸、南部沿岸、东部沿岸和湖心区)', 'N2': xls_region,
                  'O1': '云量', 'O2': xls_cloud,
                  'P1': '是否为有效监测(云量超过50%并且没有监测到蓝藻算无效,1为有效,0为无效)', 'P2': xls_activate,
                  'Q1': '说明(1全云;2有云无藻;3无云无藻;4有云有藻;5无云有藻)', 'Q2': xls_explain,
                  'R1': '天气', 'R2': xls_weather,
                  'S1': '是否被云覆盖(无覆盖、全部覆盖、部分覆盖)', 'S2': xls_cloud_cover,
                  'T1': '水华面积百分比', 'T2': xls_total_percent,
                  'U1': 'NDVI分级阈值', 'U2': xls_intensity_threshold
                  }
    format1 = workBook.add_format({'align': 'center', 'font_size': 10,
                                   'valign': 'vcenter', 'text_wrap': 1})
    for key in writeTable.keys():
        sheet.write(key, writeTable[key], format1)
    sheet.set_row(0, 60)
    sheet.set_column('A:M', 8.85)
    sheet.set_column('N:N', 73)
    sheet.set_column('P:P', 73)
    sheet.set_column('Q:Q', 60)
    sheet.set_column('S:S', 44)
    sheet.set_column('T:T', 15)
    sheet.set_column('U:U', 40)
    workBook.close()

    # 4.生成EXCEL_WX
    writeTable2 = {'A1': '报告期数', 'A2': xls_num,
                   'B1': '日期', 'B2': xls_date,
                   'C1': '时间', 'C2': xls_time,
                   'D1': 'NDVI阈值', 'D2': xls_threshold,
                   'E1': '边界缩放', 'E2': xls_boundary,
                   'F1': '面积(km2)无云无藻不填', 'F2': xls_area,
                   }
    outXlsxWxName = jsonBaseName.replace('.json', '_wx.xlsx')
    outXlsxWxPath = os.path.join(outputDir, outXlsxWxName)
    if os.path.exists(outXlsxWxPath):
        os.remove(outXlsxWxPath)
    workBook2 = xlsxwriter.Workbook(outXlsxWxPath)
    sheet2 = workBook2.add_worksheet()
    format2 = workBook2.add_format({'align': 'center', 'font_size': 10,
                                   'valign': 'vcenter', 'text_wrap': 1})
    for key in writeTable2.keys():
        sheet2.write(key, writeTable2[key], format2)
    sheet.set_row(0, 60)
    sheet.set_column('A:F', 9)
    workBook2.close()

    # 转pdf供前端查看 Windows无法测试===================================================
    outPdfDir = os.path.join(globalCfg['taihu_report_remote'], issue[0:8])
    if not os.path.exists(outPdfDir):
        os.makedirs(outPdfDir)
    cmdStr = 'libreoffice6.3 --headless --convert-to pdf:writer_pdf_Export ' + outWordPath + ' --outdir ' + outPdfDir
    print(cmdStr)
    try:
        os.system(cmdStr)
        print('Convert PDF Success.')
    except Exception as e:
        print(e)

    # 信息入库=========================================================
    conn = pymysql.connect(
        db=globalCfg['database'],
        user=globalCfg['database_user'],
        password=globalCfg['database_passwd'],
        host=globalCfg['database_host'],
        port=globalCfg['database_port']
    )

    # t_water_report_taihu
    cursor = conn.cursor()
    algaeTifName = os.path.basename(jsonPath).replace('.json', '.tif')
    db_uuid = str(uuid.uuid4())
    db_date = issue[0:4] + '-' + issue[4:6] + '-' + issue[6:8]
    db_number = str(num)
    db_description = description
    db_label1 = ''
    db_label2 = ''
    db_label3 = ''
    db_image1 = ''
    db_image2 = ''
    db_image3 = ''
    db_high_area = ''
    db_mid_area = ''
    db_low_area = ''
    db_total_area = ''
    db_high_percent = ''
    db_mid_percent = ''
    db_low_percent = ''
    db_total_percent = ''
    db_image = algaeTifName
    db_title = '太湖蓝藻水华卫星遥感监测日报'
    db_time_modify = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
    if templateID == 1:
        db_label1 = label1
        db_image1 = picturePath1.replace('\\', '/').replace('/mnt/resource/', '')
    elif templateID == 2:
        db_label1 = label1
        db_image1 = picturePath1.replace('\\', '/').replace('/mnt/resource/', '')
        db_label2 = label2
        db_image2 = picturePath2.replace('\\', '/').replace('/mnt/resource/', '')
    elif templateID == 3:
        db_label1 = label1
        db_image1 = picturePath1.replace('\\', '/').replace('/mnt/resource/', '')
        db_label2 = label2
        db_image2 = picturePath2.replace('\\', '/').replace('/mnt/resource/', '')
        db_label3 = label3
        db_image3 = picturePath3.replace('\\', '/').replace('/mnt/resource/', '')
        db_high_area = str(areaH)
        db_mid_area = str(areaM)
        db_low_area = str(areaL)
        db_total_area = str(totalArea)
        db_total_percent = '100.0'
        db_high_percent = str(percentH)
        db_mid_percent = str(percentM)
        db_low_percent = str(percentL)
    else:
        pass
    # 查找是否已存在
    sqlStr = 'SELECT * FROM ' + globalCfg['database_table_report_taihu'] + \
             ' WHERE image=%s and is_deleted=0;'
    cursor.execute(sqlStr, algaeTifName)
    sqlRes = cursor.fetchall()
    if len(sqlRes) > 0:
        # 更新
        sqlStr = 'UPDATE ' + globalCfg['database_table_report_taihu'] + \
            ' SET date=%s,number=%s,description=%s,image1=%s,image2=%s,image3=%s,label1=%s,label2=%s,label3=%s,' \
            'high_area=%s,mid_area=%s,low_area=%s,total_area=%s,high_percent=%s,mid_percent=%s,low_percent=%s,' \
            'total_percent=%s,title=%s,time_modify=%s WHERE image=%s;'
        sqlData = (db_date, db_number, db_description, db_image1, db_image2, db_image3, db_label1, db_label2, db_label3,
                   db_high_area, db_mid_area, db_low_area, db_total_area, db_high_percent, db_mid_percent,
                   db_low_percent, db_total_percent, db_title, db_time_modify, db_image)
        cursor.execute(sqlStr, sqlData)
        conn.commit()
    else:
        # 插入
        sqlStr = 'INSERT INTO ' + globalCfg['database_table_report_taihu'] + \
                 ' (uuid,date,number,description,image1,image2,image3,label1,label2,label3,high_area,' \
                 'mid_area,low_area,total_area,high_percent,mid_percent,low_percent,total_percent,is_deleted,' \
                 'is_default,image,title,time_modify) VALUES ' \
                 '(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
        sqlData = (db_uuid, db_date, db_number, db_description, db_image1, db_image2, db_image3, db_label1, db_label2,
                   db_label3, db_high_area, db_mid_area, db_low_area, db_total_area, db_high_percent,
                   db_mid_percent, db_low_percent, db_total_percent, 0, 0, db_image, db_title, db_time_modify)
        cursor.execute(sqlStr, sqlData)
        conn.commit()

    # t_water_taihu_modis
    # 查找是否已存在
    sqlStr = 'SELECT * FROM ' + globalCfg['database_table_report_taihu_info'] + \
             ' WHERE image_uuid=%s;'
    cursor.execute(sqlStr, productUuid)
    sqlRes = cursor.fetchall()
    db_date = '%s-%s-%s %s:%s' % (issue[0:4], issue[4:6], issue[6:8], issue[8:10], issue[10:12])
    regionArea = jsonData['regionArea']
    area_zsh = str(regionArea['zhushanhu'])
    area_mlh = str(regionArea['meilianghu'])
    area_gh = str(regionArea['gonghu'])
    area_xbya = str(regionArea['westCoast'])
    area_nbya = str(regionArea['southCoast'])
    area_hxq = str(regionArea['centerLake'])
    area_dbya = str(regionArea['eastCoast'])
    area_dth = str(regionArea['eastTaihu'])
    db_region_area = ','.join([area_zsh, area_mlh, area_gh, area_xbya, area_nbya, area_hxq, area_dbya, area_dth])
    if len(sqlRes) > 0:
        # 更新
        sqlStr = 'UPDATE ' + globalCfg['database_table_report_taihu_info'] + \
                 ' SET number=%s,date=%s,ndvi_threshold=%s,ndvi_max=%s,ndvi_min=%s,ndvi_mean=%s,boundary=%s,area=%s,' \
                 'region_area=%s,high_area=%s,mid_area=%s,low_area=%s,cloud=%s,type=%s,is_activate=%s,ndvi_grade=%s ' \
                 'WHERE image_uuid=%s;'
        sqlData = (xls_num, db_date, xls_threshold, xls_ndviMax, xls_ndviMin, xls_ndviMean, xls_boundary,
                   str(totalArea), db_region_area, str(areaH), str(areaM), str(areaL), xls_cloud, xls_explain,
                   xls_activate, xls_intensity_threshold, productUuid)
        cursor.execute(sqlStr, sqlData)
        conn.commit()
    else:
        sqlStr = 'INSERT INTO ' + globalCfg['database_table_report_taihu_info'] + \
                 ' (number,date,ndvi_threshold,ndvi_max,ndvi_min,ndvi_mean,boundary,area,region_area,high_area,' \
                 'mid_area,low_area,cloud,type,is_activate,ndvi_grade,image_uuid) ' \
                 'VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);'
        sqlData = (xls_num, db_date, xls_threshold, xls_ndviMax, xls_ndviMin, xls_ndviMean, xls_boundary,
                   str(totalArea), db_region_area, str(areaH), str(areaM), str(areaL), xls_cloud, xls_explain,
                   xls_activate, xls_intensity_threshold, productUuid)
        cursor.execute(sqlStr, sqlData)
        conn.commit()

    # 更新t_export_image信息
    sqlStr = 'SELECT * FROM ' + globalCfg['database_table_export_image'] + \
             ' WHERE uuid=%s and is_deleted=0;'
    cursor.execute(sqlStr, productUuid)
    sqlRes = cursor.fetchall()
    if len(sqlRes) > 0:
        sqlStr = 'UPDATE ' + globalCfg['database_table_export_image'] + \
            ' SET area=%s,threshold=%s WHERE uuid=%s;'
        sqlData = (totalArea, algaeThreshold, productUuid)
        cursor.execute(sqlStr, sqlData)
        conn.commit()
    else:
        pass

    cursor.close()
    conn.close()

    # 更新切片==============================================================
    tileDict = {}
    basename = '_'.join(jsonBaseName.split('_')[0:7])
    # 1.蓝藻产品切片
    algaeTifPath = os.path.join(outputDir, jsonBaseName.replace('.json', '.tif'))
    algaeTifRender = os.path.join(outputDir, jsonBaseName.replace('.json', '_render.tif'))
    colorTable = {1: (255, 251, 0)}
    UniqueValues.Render(algaeTifPath, colorTable, returnMode='GEOTIFF', outputPath=algaeTifRender, isAlpha=True)
    tileDict['taihu_algae_ndvi'] = {'tif': algaeTifRender, 'name': basename + '_taihu_algae_ndvi',
                                    'legendType': '1', 'legendColor': [(255, 251, 0)], 'legendName': ['水华']}

    # 2.蓝藻强度产品切片
    intensityTifPath = os.path.join(outputDir, jsonBaseName.replace('_ndvi.json', '_intensity.tif'))
    intensityTifRender = os.path.join(outputDir, jsonBaseName.replace('_ndvi.json', '_intensity_render.tif'))
    colorTable = {1: (0, 255, 102), 2: (255, 255, 0), 3: (255, 153, 0)}
    UniqueValues.Render(intensityTifPath, colorTable, returnMode='GEOTIFF', outputPath=intensityTifRender, isAlpha=True)
    tileDict['algaeClassify'] = {'tif': intensityTifRender, 'name': basename + '_classify', 'legendType': '1',
                                 'legendColor': [(0, 255, 102), (255, 255, 0), (255, 153, 0)],
                                 'legendName': ['轻度', '中度', '重度']}

    # 调用gdal2tiles工具进行切片
    pythonPath = globalCfg['python_path']
    gdal2tilesPath = globalCfg['gdal2tiles_path']
    tileOutRootDir = globalCfg['tile_server_path']
    for key in tileDict.keys():
        tileTif = tileDict[key]['tif']
        tileOutDir = os.path.join(tileOutRootDir, tileDict[key]['name'])
        if os.path.exists(tileOutDir):
            shutil.rmtree(tileOutDir)
        cmd = '%s %s -z %s -w all %s %s' % (pythonPath, gdal2tilesPath, TILE_LEVEL, tileTif, tileOutDir)
        os.system(cmd)
        os.remove(tileTif)
        tileDict[key]['path'] = tileOutDir
Пример #27
0
from docxtpl import DocxTemplate, RichText

tpl = DocxTemplate('templates/check-control.docx')
tpl.render({'checked': False})
tpl.save('output/check-control.docx')
Пример #28
0
#!/usr/bin/env python

from docxtpl import DocxTemplate
import jinja2
import subprocess

doc = DocxTemplate("invite.docx")
context = { 'title' : "Lord Commander", 'name' : "John Snow" }
jinja_env = jinja2.Environment()
# jinja_env.filters['myfilter'] = myfilterfunc
doc.render(context,jinja_env)
filename = "JohnSnow.docx"
doc.save(filename)

#unoconv -f pdf invite.docx 

out = subprocess.check_output(['/usr/bin/python3', '/usr/bin/unoconv', '-f', 'pdf', 'invite.docx'])
print out
from docxtpl import DocxTemplate, RichText
from jinja2.exceptions import TemplateError
import six

six.print_('=' * 80)
six.print_("Generating template error for testing (so it is safe to ignore) :")
six.print_('.' * 80)
try:
    tpl = DocxTemplate('test_files/template_error_tpl.docx')
    tpl.render({'test_variable': 'test variable value'})
except TemplateError as the_error:
    six.print_(six.text_type(the_error))
    if hasattr(the_error, 'docx_context'):
        print "Context:"
        for line in the_error.docx_context:
            six.print_(line)
tpl.save('test_files/template_error.docx')
six.print_('.' * 80)
six.print_(" End of TemplateError Test ")
six.print_('=' * 80)
Пример #30
0
def get_pubs_by_docx(request, type):
    if type == '5year':
        year_search = timezone.now().year - 5
        promejutok = 'за последние 5 лет'
    elif type == 'allyear':
        year_search = 1920
        promejutok = 'за все время'

    article_scopus = []
    article_vak = []
    article_rinc = []
    patents = []
    monograf = []
    ucheb_posobie = []

    def get_vol_in_pages(edition_info, authors):
        if re.search('статья в журнале - научная статья', edition_info['type']) or \
           re.search('Article',edition_info['type']) or \
           re.search('тезисы доклада на конференции', edition_info['type']) or\
           re.search('статья в сборнике трудов конференции', edition_info['type']):
            pgs = str(edition_info['pages']).split("-")
            if len(pgs) > 1:
                raznost_pgs = int(pgs[1]) - int(pgs[0])
                authors_cnt = len(authors.split(','))
                res = "{0}/{1:.2f}".format(raznost_pgs,
                                           raznost_pgs / authors_cnt)
            else:
                res = "{0}/{1:.2f}".format(edition_info['pages'],
                                           1 / len(authors.split(',')))
        else:
            pages_cnt = edition_info['pages'] if 'pages' in edition_info.keys(
            ) else 1
            res = "{0}/{1:.2f}".format(
                pages_cnt,
                int(pages_cnt) / len(authors.split(',')))
        return res

    for item in Publications.objects.all():
        if re.search(pytils.translit.translify(request.user.last_name),
                     pytils.translit.translify(item.authors)):
            if item.year >= year_search:
                edition_info = eval(item.edition_info)

                #удаление автора из списка соавторов
                co_authors = str(item.authors).split(",")
                for it in co_authors:
                    it.strip()
                    if re.search(request.user.get_fullname(), it) or re.search(
                            pytils.translit.translify(request.user.last_name),
                            it):
                        co_authors.remove(it)
                co_authors = ",".join(co_authors)

                if edition_info['type'] == 'патент на изобретение':
                    patents.append({
                        'id':
                        len(patents) + 1,
                        'title':
                        "{0} ({1})".format(item.title, edition_info['type']),
                        'biblio_info':
                        "Номер патента: {0}. {1}г. {2}".format(
                            edition_info['Номер патента'], item.year,
                            edition_info['Страна']),
                        'vol_in_page':
                        get_vol_in_pages(edition_info, item.authors),
                        'co_authors':
                        co_authors,
                        'year':
                        item.year
                    })
                elif edition_info['type'] == 'монография':
                    monograf.append({
                        'id':
                        len(monograf) + 1,
                        'title':
                        "{0} ({1})".format(item.title, edition_info['type']),
                        'biblio_info':
                        "{0}. {1}. {2} с. ISBN:{3}".format(
                            edition_info['edition'], item.year,
                            edition_info['pages'], edition_info['isbn']),
                        'vol_in_page':
                        get_vol_in_pages(edition_info, item.authors),
                        'co_authors':
                        co_authors,
                        'year':
                        item.year
                    })
                elif edition_info['type'] == 'учебное пособие':
                    ucheb_posobie.append({
                        'id':
                        len(ucheb_posobie) + 1,
                        'title':
                        "{0} ({1})".format(item.title, edition_info['type']),
                        'biblio_info':
                        "{0}. {1}. {2} с. ISBN:{3}".format(
                            edition_info['edition'], item.year,
                            edition_info['pages'], edition_info['isbn']),
                        'vol_in_page':
                        get_vol_in_pages(edition_info, item.authors),
                        'co_authors':
                        co_authors,
                        'year':
                        item.year
                    })
                elif re.search('статья в журнале - научная статья',
                               str(edition_info['type'])) or re.search(
                                   'Article', str(edition_info['type'])):
                    if item.isScopusWoS:

                        article_scopus.append({
                            'id':
                            len(article_scopus) + 1,
                            'title':
                            "{0} ({1})".format(item.title,
                                               edition_info['type']),
                            'biblio_info':
                            '// {0}. {1}. {2} №{3}. P.{4}.'.format(
                                str(edition_info['name']).title(), item.year,
                                'V.' + edition_info['volume']
                                if edition_info['volume'] is not None else '',
                                edition_info['number'], edition_info['pages']),
                            'vol_in_page':
                            get_vol_in_pages(edition_info, item.authors),
                            'co_authors':
                            co_authors,
                            'year':
                            item.year
                        })
                    else:
                        format_rus = "//{0}. {1}. {2} №{3}. С.{4}." if edition_info[
                            'lang'] == 'русский' else "//{0}. {1}. {2} №{3}. P.{4}."
                        if 'volume' in edition_info.keys():
                            if edition_info[
                                    'volume'] is not None and edition_info[
                                        'volume']:
                                if edition_info['lang'] == 'русский':
                                    vol = 'Т.{0}.'.format(
                                        edition_info['volume'])
                                else:
                                    vol = 'V.{0}.'.format(
                                        edition_info['volume'])
                            else:
                                vol = ''
                        article_vak.append({
                            'id':
                            len(article_vak) + 1,
                            'title':
                            "{0} ({1})".format(item.title,
                                               edition_info['type']),
                            'biblio_info':
                            format_rus.format(
                                str(edition_info['name']).title(), item.year,
                                vol, edition_info['number'],
                                edition_info['pages']),
                            'vol_in_page':
                            get_vol_in_pages(edition_info, item.authors),
                            'co_authors':
                            co_authors,
                            'year':
                            item.year
                        })
                elif re.search('тезисы доклада на конференции',
                               str(edition_info['type'])) or re.search(
                                   'статья в сборнике трудов конференции',
                                   str(edition_info['type'])):
                    format_rus = "//{0}. {1}. С.{2}." if edition_info[
                        'lang'] == 'русский' else "{0}. {1}. P.{2}."
                    article_rinc.append({
                        'id':
                        len(article_rinc) + 1,
                        'title':
                        "{0} ({1})".format(item.title, edition_info['type']),
                        'biblio_info':
                        format_rus.format(
                            str(edition_info['name']).title(),
                            edition_info['conference'], edition_info['pages']),
                        'vol_in_page':
                        get_vol_in_pages(edition_info, item.authors),
                        'co_authors':
                        co_authors,
                        'year':
                        item.year
                    })
                elif re.search('диссертация', str(edition_info['type'])):
                    article_rinc.append({
                        'id':
                        len(article_rinc) + 1,
                        'title':
                        "{0} ({1})".format(item.title, edition_info['type']),
                        'biblio_info':
                        "// {0}. {1}. {2}c.".format(
                            str(edition_info['type']).title(), item.year,
                            edition_info['pages']),
                        'vol_in_page':
                        get_vol_in_pages(edition_info, item.authors),
                        'co_authors':
                        co_authors,
                        'year':
                        item.year
                    })
                else:
                    print('Пропущена публикация: ', edition_info['type'], ' ',
                          item.authors, '  ', item.title, '  ',
                          item.isScopusWoS)

    #расположение публикаций в хронологическом порядке
    article_scopus.sort(key=lambda a: a['year'])
    article_vak.sort(key=lambda a: a['year'])
    article_rinc.sort(key=lambda a: a['year'])
    ucheb_posobie.sort(key=lambda a: a['year'])
    monograf.sort(key=lambda a: a['year'])
    patents.sort(key=lambda a: a['year'])

    def set_ids(arr):
        id = 1
        for item in arr:
            item['id'] = id
            id += 1
        return arr

    article_scopus = set_ids(article_scopus)
    article_vak = set_ids(article_vak)
    article_rinc = set_ids(article_rinc)
    ucheb_posobie = set_ids(ucheb_posobie)
    patents = set_ids(patents)

    #формирование документа
    zaf_kav = generator_core.get_zaf_kaf(request.user.deparmt)
    rp_file_object = tempfile.NamedTemporaryFile(
        suffix='.docx')  # create temp file
    document = DocxTemplate(
        os.path.join(os.path.join(settings.BASE_DIR, "static/doc_templ"),
                     'template_pubs.docx'))
    document.render(
        context={
            'author': {
                'full_name':
                '{0} {1} {2}'.format(request.user.last_name, request.user.
                                     first_name, request.user.patronymic),
                'IO_family':
                request.user.get_fullname()
            },
            'zav_kaf': {
                'position': zaf_kav['position'],
                'name': zaf_kav['name']
            },
            'promejutok': promejutok,
            'year': timezone.now().year,
            'tbl_article_scopus': article_scopus,
            'tbl_article_vak': article_vak,
            'tbl_article_other': article_rinc,
            'tbl_monografia': monograf,
            'tbl_ucheb_posobiya_UMO': '',  #-----------не сделано
            'tbl_ucheb_posobiya': ucheb_posobie,
            'tbl_ucheb_meth_izdanya': '',  #-----------не сделано
            'tbl_patents': patents,
        })
    document.save(rp_file_object.name)

    res = HttpResponse(
        rp_file_object.file,
        content_type=
        'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
    )
    res['Content-Disposition'] = 'attachment; filename=result.docx'
    res['Content-Length'] = os.path.getsize(rp_file_object.name)
    return res
def generate_electrical_docx(project_chapter6_type, args):
    # **********************************************
    print("*" * 30)
    # step:1
    # 载入参数
    print("---------step:1  载入参数--------")
    #  chapter 6
    Dict_6 = {}
    # project_chapter6_type = ['山地']
    # args=[19, 22, 8, 1.5, 40, 6]
    project01 = WireRod(project_chapter6_type, *args)
    project01.aluminium_cable_steel_reinforced("LGJ_240_30")
    args_chapter6_01_name = ['钢芯铝绞线']
    args_chapter6_01_type = ['LGJ_240_30']

    for i in range(0, len(args_chapter6_01_name)):
        if args_chapter6_01_name[i] == '钢芯铝绞线':
            print("---------线材:钢芯铝绞线--------")
            key_dict = args_chapter6_01_type[i]
            if key_dict == 'LGJ_240_30':
                value_dict = str(
                    project01.aluminium_cable_steel_reinforced_length_weight)
                Dict_6[key_dict] = value_dict
    print("---------线材生成完毕--------")

    electrical_insulator_name_list = ['复合绝缘子', '瓷绝缘子', '复合针式绝缘子', '复合外套氧化锌避雷器']
    electrical_insulator_type_list = [
        'FXBW4_35_70', 'U70BP_146D', 'FPQ_35_4T16', 'YH5WZ_51_134'
    ]

    tower_type_list = [
        '单回耐张塔', '单回耐张塔', '单回耐张塔', '单回直线塔', '单回直线塔', '双回耐张塔', '双回耐张塔', '双回直线塔',
        '双回直线塔', '铁塔电缆支架'
    ]
    tower_type_high_list = [
        'J2_24', 'J4_24', 'FS_18', 'Z2_30', 'ZK_42', 'SJ2_24', 'SJ4_24',
        'SZ2_30', 'SZK_42', '角钢'
    ]
    tower_weight_list = [
        6.8,
        8.5,
        7,
        5.5,
        8.5,
        12.5,
        17,
        6.5,
        10,
        0.5,
    ]
    tower_height_list = [32, 32, 27, 37, 49, 37, 37, 42, 54, 0]
    tower_foot_distance_list = [5.5, 5.5, 6, 5, 6, 7, 8, 6, 8, 0]

    project_chapter6_type = ['山地']
    project02 = ElectricalInsulator(project_chapter6_type, *args)
    project02.sum_cal_tower_type(tower_type_list, tower_type_high_list,
                                 tower_weight_list, tower_height_list,
                                 tower_foot_distance_list)
    project02.electrical_insulator_model(project_chapter6_type,
                                         electrical_insulator_name_list,
                                         electrical_insulator_type_list)

    args_chapter6_02_type = electrical_insulator_type_list

    for i in range(0, len(args_chapter6_02_type)):
        key_dict = args_chapter6_02_type[i]
        if key_dict == 'FXBW4_35_70':
            value_dict = str(project02.used_numbers_FXBW4_35_70)
            Dict_6[key_dict] = value_dict
        if key_dict == 'U70BP_146D':
            value_dict = str(project02.used_numbers_U70BP_146D)
            Dict_6[key_dict] = value_dict
        if key_dict == 'FPQ_35_4T16':
            value_dict = str(project02.used_numbers_FPQ_35_4T16)
            Dict_6[key_dict] = value_dict
        if key_dict == 'YH5WZ_51_134':
            value_dict = str(project02.used_numbers_YH5WZ_51_134)
            Dict_6[key_dict] = value_dict

    print("---------绝缘子生成完毕--------")

    args_chapter6_03_type = tower_type_high_list
    project03 = TowerType(project_chapter6_type, *args)
    project03.sum_cal_tower_type(tower_type_list, tower_type_high_list,
                                 tower_weight_list, tower_height_list,
                                 tower_foot_distance_list)

    for i in range(0, len(args_chapter6_03_type)):
        key_dict = args_chapter6_03_type[i]
        if key_dict == 'J2_24':
            value_dict = str(project03.used_numbers_single_J2_24)
            Dict_6[key_dict] = value_dict
        if key_dict == 'J4_24':
            value_dict = str(project03.used_numbers_single_J4_24)
            Dict_6[key_dict] = value_dict
        if key_dict == 'FS_18':
            value_dict = str(project03.used_numbers_single_FS_18)
            Dict_6[key_dict] = value_dict
        if key_dict == 'Z2_30':
            value_dict = str(project03.used_numbers_single_Z2_30)
            Dict_6[key_dict] = value_dict
        if key_dict == 'ZK_42':
            value_dict = str(project03.used_numbers_single_ZK_42)
            Dict_6[key_dict] = value_dict
        if key_dict == 'SJ2_24':
            value_dict = str(project03.used_numbers_double_SJ2_24)
            Dict_6[key_dict] = value_dict
        if key_dict == 'SJ4_24':
            value_dict = str(project03.used_numbers_double_SJ4_24)
            Dict_6[key_dict] = value_dict
        if key_dict == 'SZ2_30':
            value_dict = str(project03.used_numbers_double_SZ2_30)
            Dict_6[key_dict] = value_dict
        if key_dict == 'SZK_42':
            value_dict = str(project03.used_numbers_double_SZK_42)
            Dict_6[key_dict] = value_dict
        if key_dict == '角钢':
            value_dict = str(project03.used_numbers_angle_steel)
            Dict_6[key_dict] = value_dict

    Dict_6['铁塔合计'] = str(project03.sum_used_numbers)

    print("---------铁塔生成完毕--------")

    tower_base_list = ['ZJC1', 'ZJC2', 'JJC1', 'JJC2', 'TW1', 'TW2', '基础垫层']
    c25_unit_list = [12, 16, 42, 80, 8.8, 10.2, 2.4]
    steel_unit_list = [300, 500, 750, 900, 600, 800, 0]
    foot_bolt_list = [100, 180, 280, 360, 100, 180, 0]

    args_chapter6_04_type = tower_base_list
    project04 = TowerBase(project_chapter6_type, *args)
    project04.sum_cal_tower_type(tower_type_list, tower_type_high_list,
                                 tower_weight_list, tower_height_list,
                                 tower_foot_distance_list)
    project04.sum_cal_tower_base(tower_base_list, c25_unit_list,
                                 steel_unit_list, foot_bolt_list)

    for i in range(0, len(args_chapter6_04_type)):
        key_dict = args_chapter6_04_type[i]
        if key_dict == 'ZJC1':
            Dict_6['ZJC1_num'] = str(project04.used_numbers_base_zjc1)
            Dict_6['c25_sum_zjc1'] = str(project04.c25_sum_zjc1)
            Dict_6['steel_sum_zjc1'] = str(project04.steel_sum_zjc1)

        if key_dict == 'ZJC2':
            Dict_6['ZJC2_num'] = str(project04.used_numbers_base_zjc2)
            Dict_6['c25_sum_zjc2'] = str(project04.c25_sum_zjc2)
            Dict_6['steel_sum_zjc2'] = str(project04.steel_sum_zjc2)

        if key_dict == 'JJC1':
            Dict_6['JJC1_num'] = str(project04.used_numbers_base_jjc1)
            Dict_6['c25_sum_jjc1'] = str(project04.c25_sum_jjc1)
            Dict_6['steel_sum_jjc1'] = str(project04.steel_sum_jjc1)

        if key_dict == 'JJC2':
            Dict_6['jjc2_num'] = str(project04.used_numbers_base_jjc2)
            Dict_6['c25_sum_jjc2'] = str(project04.c25_sum_jjc2)
            Dict_6['steel_sum_jjc2'] = str(project04.steel_sum_jjc2)

        if key_dict == 'TW1':
            Dict_6['tw1_num'] = str(project04.used_numbers_base_tw1)
            Dict_6['c25_sum_tw1'] = str(project04.c25_sum_tw1)
            Dict_6['steel_sum_tw1'] = str(project04.steel_sum_tw1)
        if key_dict == 'TW2':
            Dict_6['tw2_num'] = str(project04.used_numbers_base_tw2)
            Dict_6['c25_sum_tw2'] = str(project04.c25_sum_tw2)
            Dict_6['steel_sum_tw2'] = str(project04.steel_sum_tw2)

        if key_dict == '基础垫层':
            Dict_6['base_layer'] = str(project04.used_numbers_base_layer)
            Dict_6['c25_sum_layer'] = str(project04.c25_sum_layer)
            Dict_6['steel_sum_layer'] = str(project04.steel_sum_layer)

    Dict_6['基础数量合计'] = str(project04.used_numbers_base_sum)
    Dict_6['基础混凝土合计'] = str(project04.c25_sum)
    Dict_6['基础钢筋合计'] = str(project04.steel_sum)

    print("---------铁塔基础生成完毕--------")

    cable_project_list = ['高压电缆', '高压电缆', '电缆沟', '电缆终端', '电缆终端']
    cable_model_list = [
        'YJLV22_26_35_3_95_gaoya', 'YJV22_26_35_1_300_gaoya', '电缆沟长度',
        'YJLV22_26_35_3_95_dianlanzhongduan',
        'YJV22_26_35_1_300_dianlanzhongduan'
    ]

    args_chapter6_05_type = cable_model_list
    project05 = Cable(project_chapter6_type, *args)
    project05.sum_cal_cable(cable_project_list, cable_model_list)

    for i in range(0, len(args_chapter6_05_type)):
        key_dict = args_chapter6_05_type[i]
        if key_dict == 'YJLV22_26_35_3_95_gaoya':
            Dict_6['YJLV22_26_35_3_95_gaoya'] = str(
                project05.cable_model_YJLV22_26_35_3_95_gaoya)
        if key_dict == 'YJV22_26_35_1_300_gaoya':
            Dict_6['YJV22_26_35_1_300_gaoya'] = str(
                project05.cable_model_YJV22_26_35_1_300_gaoya)
        if key_dict == '电缆沟长度':
            Dict_6['电缆沟长度'] = str(project05.cable_model_cable_duct)
        if key_dict == 'YJLV22_26_35_3_95_dianlanzhongduan':
            Dict_6['YJLV22_26_35_3_95_dianlanzhongduan'] = str(
                project05.cable_model_YJLV22_26_35_3_95_dianlanzhongduan)
        if key_dict == 'YJV22_26_35_1_300_dianlanzhongduan':
            Dict_6['YJV22_26_35_1_300_dianlanzhongduan'] = str(
                project05.cable_model_YJV22_26_35_1_300_dianlanzhongduan)
    # print(Dict_6)
    path_images = r"C:\Users\Administrator\PycharmProjects\docx_project\files\results"
    tpl = DocxTemplate(
        r'C:\Users\Administrator\PycharmProjects\Odoo_addons_NB\autocrword\models\chapter_6\CR_chapter6_template.docx'
    )
    tpl.render(Dict_6)

    tpl.save(
        r'C:\Users\Administrator\PycharmProjects\Odoo_addons_NB\autocrword\models\chapter_6\result_chapter6_e.docx'
    )
    print("---------chapter 6 生成完毕--------")
Пример #32
0
# -*- coding: utf-8 -*-
from docxtpl import DocxTemplate
from docxtpl import InlineImage
from docx.shared import Inches
import jinja2

doc = DocxTemplate("1_Смоленский б-р, дом 17, стр. 5 ТЗК _ФИНАЛ_ДЛЯ ТЗК.docx")
images = {'main1.jpg': InlineImage(doc,'images/main1.jpg', width=Inches(4.73))}

context = {
    'customer': 'Фонд капитального ремонта многоквартирных домов города Москвы',
    'adress': 'г. Москва, ЦАО, Смоленский б-р, дом 17, стр. 5',
    'main_image': images['main1.jpg'],

}

jinja_env = jinja2.Environment(autoescape=True)
doc.render(context, jinja_env)
doc.save("result-final.docx")
Пример #33
0
def main():
    config = configparser.ConfigParser()
    config.read('autotemplation.ini')
    template_folder_ids = config['DEFAULT']['TemplateFolderID'].split(',')
    destination_folder_name = config['DEFAULT']['DestinationFolderName']
    credentials = get_credentials()
    http = credentials.authorize(httplib2.Http())
    drive_service = discovery.build('drive', 'v3', http=http)
    destination_folder_id = get_or_create_destination_folder_id(
        drive_service, destination_folder_name)
    template_file = get_template(drive_service, template_folder_ids)
    mime_type, is_sheet = get_mime_type(template_file['mimeType'])
    request = drive_service.files().export_media(
        fileId=template_file['id'],
        mimeType=mime_type)
    fh = io.BytesIO()
    downloader = MediaIoBaseDownload(fh, request)
    done = False
    while done is False:
        status, done = downloader.next_chunk()
        print("Download %d%%." % int(status.progress() * 100))
    if is_sheet:
        print("Spreadsheet selected, converting to Doc. (Slow)")
        table_data = get_sheet_data(fh)
        row_count = len(table_data)
        col_count = len(table_data[0])
        document = Document()
        doc_table = document.add_table(rows=row_count,
                                       cols=col_count)
        for r, row in enumerate(table_data):
            row_cells = doc_table.rows[r].cells
            print("Converting row {}/{}...".format(r+1, row_count), end="\r")
            for i, cell in enumerate(row):
                if cell:
                    row_cells[i].text = cell
        print("Conversion complete. "
              "Warning: Processing large sheets will take some time.")
        temp_doc_file = io.BytesIO()
        document.save(temp_doc_file)
        doc = DocxTemplate(temp_doc_file)
    else:
        doc = DocxTemplate(fh)
    full_doc = doc.get_docx()

    template_vars = get_template_variables(full_doc, template_file['name'])
    if any('__' in x for x in template_vars):
        worksheet = get_worksheet(credentials)
        worksheet_headers = get_worksheet_headers(worksheet)
    context = dict()
    get_date_and_set_context(context)
    for var in template_vars:
        if var not in context:
            if '__' in var:
                context[var] = worksheet_lookup(
                    worksheet, worksheet_headers, var)
            else:
                context[var] = input("Enter a value for {}:  ".format(var))
    new_file_name = get_target_name(template_file['name'], context)
    doc.render(context)
    temp_file = tempfile.NamedTemporaryFile()
    doc.save(temp_file)
    if is_sheet:
        csv_name = '{}.csv'.format(new_file_name)
        doc_csv = DocxTemplate(temp_file)
        csv_data = get_table_data_for_csv(doc_csv)
        if csv_data:
            with open(csv_name, 'w') as output:
                writer = csv.writer(output, lineterminator='\n')
                writer.writerows(csv_data)
            print('{} created in local folder'.format(csv_name))
        else:
            print('Unable to create CSV. '
                  'Less than or more than 1 table found.')
        workbook = Workbook()
        sheet = workbook.get_active_sheet()
        for row in csv_data:
            sheet.append(row)
        workbook.save(temp_file)
        upload_mimetype = 'application/vnd.google-apps.spreadsheet'
    else:
        upload_mimetype = 'application/vnd.google-apps.document'

    file_metadata = {
        'name': new_file_name,
        'parents': [destination_folder_id],
        'mimeType': upload_mimetype
    }
    media = MediaFileUpload(temp_file.name,
                            mimetype=mime_type,
                            resumable=True)
    drive_service.files().create(body=file_metadata,
                                 media_body=media,
                                 fields='id').execute()
    print('{} placed in folder {}.'.format(new_file_name,
                                           destination_folder_name))
    temp_file.close()
Пример #34
0
def from_template(brand, model, fuel, price, template):
    template = DocxTemplate(template)
    context = get_context(brand, model, fuel, price)

    template.render(context)
    template.save(str(datetime.datetime.now().date()) + '_report.docx')
Пример #35
0
name = PersonForm.first_name
print(name)
context = { 'name' : name, 'last_name': " 11", 'fam_name': "22"}

'''
doc1 = docx.Document('example.docx')
doc2 = docx.Document('restyled.docx')

# получаем из первого документа стили всех абзацев
styles = []
for paragraph in doc1.paragraphs:
    styles.append(paragraph.style)

# применяем стили ко всем абзацам второго документа
for i in range(len(doc2.paragraphs)):
    doc2.paragraphs[i].style = styles[i]

doc2.save('restored.docx')
'''
text = []
for paragraph in var5.paragraphs:
    text.append(paragraph.text)
#print('\n'.join(text))
myString = '/n/t'.join(text)
#context = { 'third' : myString}
context = { 'fifth' : myString}
#context = { 'seventh' : myString}
final_dock.render(context)
final_dock.save("final.docx")
print("true")
Пример #36
0
    sd = tpl.new_subdoc(detectinfopath)
    sd.add_paragraph()
    if 'bigPannel' in catecode:
        sd = bigpannel_detect_info(sd)
    elif 'ycWcd' in catecode:
        sd = ycwcd_detect_info(sd)
    elif 'ycB' in catecode:
        sd = ycb_detect_info(sd)
    elif 'ycBplus' in catecode:
        sd = ycbplus_detect_info(sd)
    elif 'thys' in catecode:
        pass
        return
    else:
        sd = detect_info(sd, detectgene)
    sd.add_page_break()
    return sd


if __name__ == '__main__':
    tplpath = os.path.join(basepath, 'blank.docx')
    detectinfo = detectinfo()
    for itemid in detectinfo:
        tpl = DocxTemplate(tplpath)
        context = summary(tpl, detectinfo[itemid])
        word = os.path.join(basepath, 'detectinfo', itemid + '.docx')
        context = {'context': context}
        tpl.render(context, autoescape=True)
        tpl.save(word)
    print(f'检测信息更新成功')
Пример #37
0
    'arch_plans_dem_horz': ws['EE2'].value,
    'arch_plans_pr_horz': ws['EF2'].value,
    'arch_plans_ex_vert': ws['EG2'].value,
    'arch_plans_dem_vert': ws['EH2'].value,
    'arch_plans_pr_vert': ws['EI2'].value,
    'eng_plans_horz': ws['EJ2'].value,
    'eng_plans_vert': ws['EK2'].value,
    'notice_1_req': ws['EN2'].value,
    'notice_2_req': ws['EP2'].value,
    'notice_6_req': ws['ER2'].value,
}

'''print(bo_property_add_horz)
print(bo_correspond_add_horz)
print(bo_correspond_add_vert)
print(ao_property_add_horz)
print(ao_property_add_vert)
print(ao_correspond_add_horz)
print(ao_correspond_add_vert)'''
print(notice_date.strftime("%d %B %Y"))
#print(type(ao_letter_names))

tpl = DocxTemplate('templates/'+selected_doc+'.docx')
tpl.render(context, jinja_env)
tpl.save(folder+'/'+selected_doc+'.docx')


#unoconv -f ('output/'+selected_doc+'.docx', 'output/'+selected_doc+'.pdf')
#unoconv -f pdf '01 - Letter of Appointment BO.docx'
#doc2pdf custom_LoA.docx
import json
from docxtpl import DocxTemplate

doc = DocxTemplate("report_tpl_5_x.docx")

with open('ds.json', 'r') as f:
    data = json.load(f)

doc.render(data)
doc.save("generated_doc.docx")
Пример #39
0
    def apply_changes_impl(self):
        obj = race()
        mw = GlobalAccess().get_main_window()
        map_items = [obj.persons, obj.results, obj.groups, obj.courses, obj.organizations]
        map_names = ['persons', 'results', 'groups', 'courses', 'organizations']
        selected_items = {
            'persons': [],
            'results': [],
            'groups': [],
            'courses': [],
            'organizations': [],
        }

        template_path = self.item_template.currentText()

        _settings['last_template'] = template_path
        _settings['open_in_browser'] = self.item_open_in_browser.isChecked()
        _settings['save_to_last_file'] = self.item_save_to_last_file.isChecked()
        _settings['selected'] = self.item_selected.isChecked()

        if _settings['selected']:
            cur_items = map_items[mw.current_tab]

            for i in mw.get_selected_rows():
                selected_items[map_names[mw.current_tab]].append(cur_items[i].to_dict())

        ResultCalculation(obj).process_results()
        RaceSplits(obj).generate()
        ScoreCalculation(obj).calculate_scores()

        races_dict = [r.to_dict() for r in races()]

        if template_path.endswith('.docx'):
            # DOCX template processing
            full_path = config.template_dir() + template_path
            doc = DocxTemplate(full_path)
            context = {}
            context['race'] = races_dict[get_current_race_index()]
            context['name'] = config.NAME
            context['version'] = str(config.VERSION)
            doc.render(context)

            if _settings['save_to_last_file']:
                file_name = _settings['last_file']
            else:
                file_name = get_save_file_name(_('Save As MS Word file'), _("MS Word file (*.docx)"),
                                               '{}_official'.format(obj.data.get_start_datetime().strftime("%Y%m%d")))
            if file_name:
                doc.save(file_name)
                os.startfile(file_name)

        else:
            template = get_text_from_file(
                template_path,
                race=races_dict[get_current_race_index()],
                races=races_dict,
                rent_cards=list(RentCards().get()),
                current_race=get_current_race_index(),
                selected=selected_items
            )

            if _settings['save_to_last_file']:
                file_name = _settings['last_file']
            else:
                file_name = get_save_file_name(_('Save As HTML file'), _("HTML file (*.html)"),
                                               '{}_report'.format(obj.data.get_start_datetime().strftime("%Y%m%d")))
            if len(file_name):
                _settings['last_file'] = file_name
                with codecs.open(file_name, 'w', 'utf-8') as file:
                    file.write(template)
                    file.close()

                # Open file in your browser
                if _settings['open_in_browser']:
                    webbrowser.open('file://' + file_name, new=2)
Пример #40
0
    def gerenerate_animal_cart_docx(self):
        doc = DocxTemplate('docx_templates/animal_card_template.docx')

        params = {
            'today': datetime.now().strftime('«%d» %B %Y год'),
            'is_dog': '✓' if self.animal.kind.name == 'Собака' else '',
            'is_cat': '✓' if self.animal.kind.name == 'Кошка' else '',
            'is_socialization':
            'Да' if self.animal.is_socialization else 'Нет',
            'staff_name': self.shelter_staff.name,
        }

        for key, value in model_to_dict(self.shelter).items():
            params['shelter__' + key] = str(value)

        for key, value in model_to_dict(self).items():
            params['animalinshelter__' + key] = str(value)

        for key, value in model_to_dict(self.animalcapture).items():
            params['animalcapture__' + key] = str(value)

        for key, value in self.animal.to_dict().items():
            params['animal__' + key] = str(value)

        if not self.animal.owner_entity is None:
            for key, value in model_to_dict(self.animal.owner_entity).items():
                params['owner_entity__' + key] = str(value)

        if not self.animal.owner_individual is None:
            for key, value in model_to_dict(
                    self.animal.owner_individual).items():
                params['owner_individual__' + key] = str(value)

        params['drugs_list'] = [{
            'label': i + 1,
            'date': drug.date,
            'name': drug.drug_name,
            'dose': drug.dose
        } for i, drug in enumerate(self.animal.animaldrug_set.all())]

        params['vacines_list'] = [{
            'label': i + 1,
            'date': vactine.date,
            'name': vactine.vacine_name,
            'series': vactine.series
        } for i, vactine in enumerate(self.animal.animalvacine_set.all())]

        params['inspection_list'] = [{
            'label': i + 1,
            'date': inspection.date,
            'weight': inspection.weight,
            'anamnes': inspection.anamnes
        } for i, inspection in enumerate(
            self.animal.animalinspection_set.all())]

        for key, value in params.items():
            if value == 'None' or value == 'nan':
                params[key] = ''

            if key.endswith('date') and value != 'None' and value != 'nan':
                value = datetime.strptime(value, '%Y-%m-%d')
                params[key] = value.strftime('«%d» %B %Y года')
            elif key.endswith('date'):
                params[key] = '«__» ______ 20__ года'

        params['animalinshelter__aviary_number'] = params[
            'animalinshelter__aviary_number'].split('.')[0]
        doc.render(params)

        return doc
Пример #41
0
# sheet.cell(2,1).value = 'Bike Name'
# wb.save('testing.xlsx')

for ab in range(2, maxrow + 1):
    document = DocxTemplate('template.docx')
    bname = sheet.cell(ab, 1).value
    print(bname)
    bprice = sheet.cell(ab, 2).value
    bHelmet = sheet.cell(ab, 3).value
    purchaseExpense = sheet.cell(ab, 4).value
    serviceExpense = sheet.cell(ab, 5).value
    fuelExpense = sheet.cell(ab, 6).value
    totalExpense = sheet.cell(ab, 7).value
    mileage = sheet.cell(ab, 8).value
    purchasedDate = sheet.cell(ab, 9).value
    purchasedDate = purchasedDate.strftime('%d/%b/%Y')

    context = {
        'BNAME': bname,
        'BPRICE': bprice,
        'HELMET': bHelmet,
        'PURCHASE_EXPENSE': purchaseExpense,
        'SERVICE_EXPENSE': serviceExpense,
        'FUEL_EXPENSE': fuelExpense,
        'TOTAL_EXPENSE': totalExpense,
        'MILEAGE': mileage,
        'PURCHASED_DATE': purchasedDate
    }
    document.render(context)
    document.save('output/' + bname + '.docx')
Пример #42
0
def generatePdf(list, opt, case):

    outputName = ""

    if case == 0:
        sex, category, age, weight, experiance = [ opt[0], opt[1], [opt[2], opt[3]], [opt[4], opt[5]], opt[6] ]

        if experiance == 'Doświadczony':
            outputName += 'ind_' + sex + '_' + category + '_Wiek_' + age[0] + '_' + age[1] + '_Waga_' + weight[0] + '_' + weight[1] + '_doswiadczony'
        else:
            outputName += 'ind_' + sex + '_' + category + '_Wiek_' + age[0] + '_' + age[1] + '_Waga_' + weight[0] + '_' + weight[1] + '_niedoswiadczony'
    elif case == 1:
        sex, experiance, category = [ opt[0], opt[1], opt[2]]
		
        if experiance == 'Doświadczony':
            outputName += 'dru_' + sex + '_' + category  + '_doswiadczony'
        else:
            outputName += 'dru_' + sex + '_' + category  + '_niedoswiadczony'
			
    elif case == 2:
        sex, weight = [ opt[0], [opt[1], opt[2]] ]
		
        outputName = 'fukugo_' + sex + '_Waga_' + weight[0] + '_' + weight[1]
		
    outputName += '.docx'
	
    graph2 = [1,2]
    graph4 = [1,2,3,4]
    graph8 = [1,2,5,6,3,4,7,8]
    graph16 = [1,2,9,10,5,6,13,14,3,4,11,12,7,8,15,16]
    graph32 = [1,2,17,18,5,6,21,22,9,10,25,26,13,14,29,30,3,4,19,20,7,8,23,24,11,12,27,28,15,16,31,32]
    graph64 = [1,2,33,34,17,18,49,50,9,10,41,42,25,26,57,58,5,6,37,38,21,22,53,54,13,14,45,46,29,30,61,62,3,4,35,36,19,20,51,52,11,12,43,44,27,28,59,60,7,8,39,40,23,24,55,56,15,17,47,48,31,32,63,64]

    listLen = len(list)*2
		
    if listLen > 32:
        iterElem = 64
        graphList = graph64
        inputFile = "template_64.docx"
    elif listLen > 16:
        iterElem = 32
        graphList = graph32
        inputFile = "template_32.docx"
    elif listLen > 8:
        iterElem = 16
        graphList = graph16
        inputFile = "template_16.docx"
    elif listLen > 4:
        iterElem = 8
        graphList = graph8
        inputFile = "template_8.docx"
    elif listLen > 2:
        iterElem = 4
        graphList = graph4
        inputFile = "template_4.docx"
    else:
        iterElem = 2
        graphList = graph2	
        inputFile = "template_2.docx"

    context = {
				'zawody': "",
				'data' : "",
				'konkurencja' : ""}
    slot = 1
    for i in range(iterElem/2):
        print ("iterator")+str(i)+(" list len ")+str(listLen)+" iterElem "+str(iterElem/2)
        valGraphList = graphList[(2*i)+1]-1 # i -> numer osoby, *2 by uzyskac pare. +1 bo iterujemy liste od zera. -1 poniewaz chcemy sprawdzic nieparzysta osobe z pary. Jesli ona jest, jest parzysta. Jesli nie, to nie ma koniec.
        if valGraphList != 0:
            print("w ifie")
            valGraphList = valGraphList/2
        print(str(valGraphList))
        if valGraphList*2 <= listLen-1:
            print("w ifie2")
            context['slot_'+str(slot)] = '%s: %s' % (list[valGraphList][0]+1,list[valGraphList][1][0])
            slot = slot + 1
            context['slot_'+str(slot)] = '%s: %s' % (list[valGraphList][0]+1,list[valGraphList][1][0])
            slot = slot + 1
            print("dodane do slownika")
	
	
    doc = DocxTemplate("templates/"+inputFile)
    doc.render(context)
    print("saving")
    doc.save(outputName)
Пример #43
0
    def create_source_docx_partner(self, cr, uid, ids, report, records, init_pay, context=None):
        # 2016-11-2 支持了图片
        # 1.导入依赖,python3语法
        # from . import report_helper
        # 2. 需要添加一个"tpl"属性获得模版对象
        tempname = tempfile.mkdtemp()
        temp_out_file = self.generate_temp_file(tempname)
        doc = DocxTemplate(misc.file_open(report.template_file).name)

        env = api.Environment(cr, uid, context)
        partner = env.get('partner').search([('id', '=', context.get('partner_id'))])
        simple_dict = {'partner_name': partner.name,
                       'from_date': context.get('from_date'),
                       'to_date': context.get('to_date'),
                       'report_line': [],
                       'init_pay': {},
                       'final_pay': {}}
        if not records:
            if init_pay:
                simple_dict['init_pay'] = init_pay
                simple_dict['final_pay'] = init_pay
            doc.render({'obj': simple_dict, 'tpl': doc}, report_helper.get_env())
            doc.save(temp_out_file)

            report_stream = ''
            with open(temp_out_file, 'rb') as input_stream:
                report_stream = input_stream.read()

            os.remove(temp_out_file)
            return report_stream, report.output_type

        data = DataModelProxy(records)
        for p_value in data:
            simple_dict['report_line'].append({
                'date': p_value.date,
                'name': p_value.name,
                'note': p_value.note,
                'amount': p_value.amount,
                'pay_amount': p_value.pay_amount,
                'discount_money': p_value.discount_money,
                'balance_amount': p_value.balance_amount
            })
        if data:
            simple_dict['init_pay'] = data[0].balance_amount - data[0].amount + data[0].pay_amount - data[
                0].discount_money
            simple_dict['final_pay'] = data[-1].balance_amount

        doc.render({'obj': simple_dict, 'tpl': doc}, report_helper.get_env())
        doc.save(temp_out_file)

        if report.output_type == 'pdf':
            temp_file = self.render_to_pdf(temp_out_file)
        else:
            temp_file = temp_out_file

        report_stream = ''
        with open(temp_file, 'rb') as input_stream:
            report_stream = input_stream.read()

        os.remove(temp_file)
        return report_stream, report.output_type
Пример #44
0
class MianWindow(noname.MyFrame1):
    def __init__(self, master):
        super(MianWindow, self).__init__(master)
        self.temp_dath = None
        self.data_dath = None
        self.dir_dath = None
        self.desktop = os.path.join(os.path.expanduser('~'), 'Desktop')
        self.tempPicker.SetInitialDirectory(self.desktop)
        self.dataPicker.SetInitialDirectory(self.desktop)
        self.dirPicker.SetInitialDirectory(self.desktop)
        self.textShow.WriteText('Welcome to the batch Template software.'+'\n\n')
        self.date1 = time.strftime('%Y-%m-%d', time.localtime())
        self.time1 = time.strftime('%H-%M-%S', time.localtime())
        self.expired = self.loadDate()
        self.textShow.WriteText('Data: '+self.date1+'\n')
        self.textShow.WriteText('Time: '+self.time1+'\n\n')
        self.textShow.WriteText('The software will expire on '+self.expired+'\n\n')
        self.textShow.WriteText('')

    def loadDate(self):
        return '2019-04-30'

    def openTempFile(self, event):
        if self.date1 > self.expired:
            self.textShow.WriteText('The software has expired.\n')
            return
        self.temp_dath = self.tempPicker.GetPath()
        print('Trmplate File:', self.temp_dath)
        self.textShow.WriteText('Trmplate File:'+self.temp_dath+"\n")
        event.Skip()
            
    def openDataFile(self, event):
        if self.date1 > self.expired:
            self.textShow.WriteText('The software has expired.\n')
            return
        self.data_dath = self.dataPicker.GetPath()
        print('Data File:', self.data_dath)
        self.textShow.WriteText('Data File:'+self.data_dath+"\n")
        wookbook = xlrd.open_workbook(self.data_dath)
        sheet01 = wookbook.sheet_by_index(0)
        self.temp_list = []
        if sheet01.nrows > 1:
            tt = [i.value for i in sheet01.row(0)]
            for i in range(1, sheet01.nrows):
                td = [i.value for i in sheet01.row(i)]
                self.temp_list.append({k:v for k,v in zip(tt, td)})
        event.Skip()
    
    def openDataDir(self, event):
        if self.date1 > self.expired:
            self.textShow.WriteText('The software has expired.\n')
            return

        self.dir_dath = self.dirPicker.GetPath()
        self.textShow.WriteText('Saved Dir:'+self.dir_dath+"\n")
        print('Saved Dir:', self.dir_dath)
        event.Skip()
    
    def startButtonFunc( self, event ):
        if self.date1 > self.expired:
            self.textShow.WriteText('The software has expired.\n')
            return
        if self.temp_dath == None or len(self.temp_dath) == 0:
            self.textShow.WriteText('Please choice a template file.\n')
            return
        if self.data_dath == None or len(self.data_dath) == 0:
            self.textShow.WriteText('Please choice a data file.\n')
            return
        if self.dir_dath == None or len(self.dir_dath) == 0:
            self.textShow.WriteText('Please choice a destination folder.\n')
            return

        print(f'Batch Deal Start: Total {len(self.temp_list)}')
        listlen = len(self.temp_list)
        for i in range(100):
            if i < listlen:
                v = (i+1)*100/listlen
                v = v if 0 <= v <= 100 else 100
                self.gauge.SetValue(v) 
                self.doc = DocxTemplate(self.temp_dath)
                dtmp = self.temp_list[i]
                tname = str([i for i in dtmp.values()][0]) +'.docx'
                self.doc.render(dtmp)
                name = os.path.join(self.dir_dath, tname)
                print('Create File:', name)
                self.textShow.WriteText('Create File:'+name+"\n"); 
                self.doc.save(name)
        print('Batch Deal End')
Пример #45
0
p.add_run('python-docx').italic = True
p.add_run(' library')

sd.add_heading('Heading, level 1', level=1)
sd.add_paragraph('This is an Intense quote', style='IntenseQuote')

sd.add_paragraph('A picture :')
sd.add_picture('python_logo.png', width=Inches(1.25))

sd.add_paragraph('A Table :')
table = sd.add_table(rows=1, cols=3)
hdr_cells = table.rows[0].cells
hdr_cells[0].text = 'Qty'
hdr_cells[1].text = 'Id'
hdr_cells[2].text = 'Desc'
recordset=( (1,101,'Spam'),
            (2,42,'Eggs'),
            (3,631,'Spam,spam, eggs, and ham') )
for item in recordset:
    row_cells = table.add_row().cells
    row_cells[0].text = str(item[0])
    row_cells[1].text = str(item[1])
    row_cells[2].text = item[2]

context = { 
    'mysubdoc' : sd,
}

tpl.render(context)
tpl.save('test_files/subdoc.docx')
Пример #46
0
 def renderDocx(self) -> None:
     doc = DocxTemplate(TEMPLATE_FILE_NAME)
     doc.render(self.__dict__)
     doc.save(self.docFileName)
Пример #47
0
def render_document(template, filename, data):
    print(data)
    doc = DocxTemplate(template)
    context = data  # Possibly do some sanitizing
    doc.render(context)
    doc.save(filename)
Пример #48
0
# -*- coding: utf-8 -*-
'''
Created : 2017-09-09

@author: Eric Lapouyade
'''

from docxtpl import DocxTemplate

# rendering the "dynamic embedded docx":
embedded_docx_tpl=DocxTemplate('test_files/embedded_embedded_docx_tpl.docx')
context = {
    'name' : 'John Doe',
}
embedded_docx_tpl.render(context)
embedded_docx_tpl.save('test_files/embedded_embedded_docx.docx')


# rendring the main document :
tpl=DocxTemplate('test_files/embedded_main_tpl.docx')

context = {
    'name' : 'John Doe',
}

tpl.replace_embedded('test_files/embedded_dummy.docx','test_files/embedded_static_docx.docx')
tpl.replace_embedded('test_files/embedded_dummy2.docx','test_files/embedded_embedded_docx.docx')
tpl.render(context)
tpl.save('test_files/embedded.docx')
Пример #49
0
    def export_resume(self,ids):
        # 定义压缩文件流
        zip_stream = StringIO()
        resume_zip = zipfile.ZipFile(zip_stream,'w')
        # 将参数转为列表
        id_list = json.loads(ids)
        # 获取要到处简历的员工
        Model = request.session.model('hr.employee')
        employees = Model.search_read([('id','in',id_list)])
        job=''

        for i,employee in enumerate(employees):

            # 获取模板
            path = os.path.abspath(os.path.dirname(sys.argv[0]))
            tpl = DocxTemplate(path.replace('\\','/')+'/myaddons/nantian_erp/resume_template.docx')
            # 简历写入的文件流
            fp = StringIO()
            experiences_list = []
            certifications_dict=[]
            if employee['job_id']:
                job = employee['job_id'][1]
            if employee['work_experience_ids']:
                Model = request.session.model('nantian_erp.work_experience')
                experiences = Model.search_read([('id','in',employee['work_experience_ids'])])
                for exper in experiences:
                    exper_dict = {'date':exper['date'] or '','name':exper['name'].replace('&','&amp;') or '','job':exper['job'].replace('&','&amp;') or '','description':exper['description'].replace('&','&amp;') or ''}
                    experiences_list.append(exper_dict)

            if employee['certificate_ids']:
                count = 0
                Model = request.session.model('nantian_erp.certificate')
                certificates = Model.search_read([('id','in',employee['certificate_ids'])])
                for cer in certificates:
                    count = count + 1
                    image = ''
                    name = ''
                    if cer['image']:
                        # 将base64 转为图片
                        f = StringIO(base64.b64decode(str(cer['image'])))
                        print "查看图片名称%d"%count
                        try:
                            image = InlineImage(tpl,f)
                        except Exception:
                            pass
                        #image = InlineImage(tpl,f,height=Mm(30))
                        f.close()
                    if cer['name']:
                        name = cer['name'].replace('&','&amp;')
                    certificate = {'name':name or '','image': image or '',}

                    certifications_dict.append(certificate)
            gender = ''
            if employee['gender'] == 'male':
                gender = u'男'
            elif employee['gender'] == 'female':
                gender = u'女'

            # 模板所需数据
            resume_dict = {'name':employee['name'] or '',
                           'gender':gender or '',
                           'birthday':employee['birthday']or '',
                           'education':employee['education']or '',
                           'graduction':employee['graduation']or '',
                           'major':employee['major']or '',
                           'job':job or '',
                           'work_time':employee['work_time']or '',
                           'specialty':employee['specialty']or '',
                           'work_experiences':experiences_list or [],
                           'certifications':certifications_dict or [],
                           }
            # encode_json = json.dumps(resume_dict)
            # rep_resume_dict = encode_json.replace('&','&amp;')
            # resume_dict = json.loads(rep_resume_dict)
            # print resume_dict
            tpl.render(resume_dict)
            tpl.save(fp)
            fp.seek(0)
            resume_zip.writestr(employee['name']+u'简历'+'.docx',fp.getvalue())
            fp.close()
        resume_zip.close()
        zip_stream.seek(0)
        # 返回压缩文件
        return request.make_response(zip_stream.getvalue() ,
            headers=[('Content-Disposition',content_disposition(u'简历'+'.zip')),
                     ('Content-Type', 'application/zip')],
            )
Пример #50
0
def generate_document(template, context, filename):
    """Generate word document from template"""
    doc = DocxTemplate(template)
    doc.render(context=context)
    doc.save(filename)
hvNodesInfoList: List[IHvNodesInfo] = hvNodesFetcher.fetchHvNodesInfo(
    startDate, endDate)
reportContext['hvNodes'] = hvNodesInfoList

# get LV Nodes Info
lvNodesFetcher = LvNodesInfoFetcher(appDbConStr)
lvNodesInfoList: List[ILvNodesInfo] = lvNodesFetcher.fetchLvNodesInfo(
    startDate, endDate)
reportContext['lvNodes'] = lvNodesInfoList

# generate report word file
tmplPath = "assets/weekly_report_template.docx"
doc = DocxTemplate(tmplPath)

# # signature Image
# signatureImgPath = 'assets/signature.png'
# signImg = InlineImage(doc, signatureImgPath)
# reportContext['signature'] = signImg

doc.render(reportContext)
dumpFileName = 'Weekly_no_{0}_{1}_to_{2}.docx'.format(
    weekNum, dt.datetime.strftime(startDate, '%d-%m-%Y'),
    dt.datetime.strftime(endDate, '%d-%m-%Y'))
dumpFileFullPath = os.path.join(appConfig['dumpFolder'], dumpFileName)
doc.save(dumpFileFullPath)
print('Weekly report word file generation done...')

# convert report to pdf
convert(dumpFileFullPath, dumpFileFullPath.replace('.docx', '.pdf'))
print('Weekly report pdf generation done...')
Пример #52
0
@author: Eric Lapouyade
'''

from docxtpl import DocxTemplate

tpl=DocxTemplate('templates/nested_for_tpl.docx')

context = {
    'dishes' : [
        {'name' : 'Pizza', 'ingredients' : ['bread','tomato', 'ham', 'cheese']},
        {'name' : 'Hamburger', 'ingredients' : ['bread','chopped steak', 'cheese', 'sauce']},
        {'name' : 'Apple pie', 'ingredients' : ['flour','apples', 'suggar', 'quince jelly']},
    ],
    'authors' : [
        {'name' : 'Saint-Exupery', 'books' : [
            {'title' : 'Le petit prince'},
            {'title' : "L'aviateur"},
            {'title' : 'Vol de nuit'},
        ]},
        {'name' : 'Barjavel', 'books' : [
            {'title' : 'Ravage'},
            {'title' : "La nuit des temps"},
            {'title' : 'Le grand secret'},
        ]},
    ]
}

tpl.render(context)
tpl.save('output/nested_for.docx')