Example #1
0
def convertir_pdf(request, pk): 
    # Obtenemos un queryset, para un determinado libro usando pk. 
    try: 
        libro = Libro.objects.get(id=pk) 
    except ValueError: # Si no existe llamamos a "pagina no encontrada". 
        raise Http404() 
    # Creamos un objeto HttpResponse con las cabeceras del PDF correctas. 
    response = HttpResponse(content_type='application/pdf') 
    # Nos aseguramos que el navegador lo abra directamente. 
    response['ContentDisposition'] = 'filename="archivo.pdf"' 
    buffer = BytesIO() 
    # Creamos el objeto PDF, usando el objeto BytesIO como si fuera un "archivo". 
    p = canvas.Canvas(buffer) 
    # Dibujamos cosas en el PDF. Aquí se genera el PDF. 
    # Consulta la documentación para una lista completa de funcionalidades. 
    p.roundRect(0, 750, 694, 120, 20, stroke=0, fill=1) 
    #p.setFont('Times­Bold',32) 
    p.setFillColorRGB(1,1,1) 
    p.drawString(100, 800, str(libro.titulo))#Obtenemos el titulo de un libro y la portada. 
    #p.drawImage(str(libro.portada.url), 100, 100, width=400, height=600) 
    # mostramos y guardamos el objeto PDF. 
    p.showPage() 
    p.save() 
    # Traemos el valor del bufer BytesIO y devolvemos la respuesta. 
    pdf = buffer.getvalue() 
    # Cerramos el bufer 
    buffer.close() 
    response.write(pdf) 
    return response 
Example #2
0
def test_empty_string():
    # make sure reading empty string does not raise error
    estring_fname = pjoin(test_data_path, 'single_empty_string.mat')
    fp = open(estring_fname, 'rb')
    rdr = MatFile5Reader(fp)
    d = rdr.get_variables()
    fp.close()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    # empty string round trip.  Matlab cannot distiguish
    # between a string array that is empty, and a string array
    # containing a single empty string, because it stores strings as
    # arrays of char.  There is no way of having an array of char that
    # is not empty, but contains an empty string.
    stream = BytesIO()
    savemat(stream, {'a': np.array([''])})
    rdr = MatFile5Reader(stream)
    d = rdr.get_variables()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    stream.truncate(0)
    stream.seek(0)
    savemat(stream, {'a': np.array([], dtype='U1')})
    rdr = MatFile5Reader(stream)
    d = rdr.get_variables()
    assert_array_equal(d['a'], np.array([], dtype='U1'))
    stream.close()
Example #3
0
def get_compressed_file_data(file_path, compresslevel=5):
    compressed_buffer = BytesIO()

    gzip_file = GzipFile(mode='wb',
                         compresslevel=compresslevel,
                         fileobj=compressed_buffer)

    try:
        fileobj = open(file_path, 'rb')
        while True:
            x = fileobj.read(65536)
            if not x:
                break
            gzip_file.write(x)
            x = None
        fileobj.close()
    except IOError as e:
        LOG.error(str(e))
        return None

    gzip_file.close()

    compressed_data = compressed_buffer.getvalue()
    compressed_buffer.close()

    return compressed_data
Example #4
0
def generar_pdf_Factura(request):
    response = HttpResponse(content_type='application/pdf')
    pdf_name = "facturas.pdf" 
    buff = BytesIO()
    doc = SimpleDocTemplate(buff,
        pagesize=letter,
        rightMargin=40,
        leftMargin=40,
        topMargin=60,
        bottomMargin=18,
        )
    facturas = []
    styles = getSampleStyleSheet()
    header = Paragraph("Listado de Facturas", styles['Heading1'])
    facturas.append(header)
    headings = ('Numero de Factura', 'Cliente', 'Fecha', 'Total')
    allfacturas = [(p.fac_num, p.cli_id, p.fac_fec, p.fac_tot) for p in Factura.objects.order_by('fac_num')]

    t = Table([headings] + allfacturas)
    t.setStyle(TableStyle(
        [
        ('GRID', (0, 0), (5, -1), 1, colors.dodgerblue),
        ('LINEBELOW', (0, 0), (-1, 0), 2, colors.darkblue),
        ('BACKGROUND', (0, 0), (-1, 0), colors.dodgerblue)
        ]
        ))

    facturas.append(t)
    doc.build(facturas)
    response.write(buff.getvalue())
    buff.close()
    return response
Example #5
0
    def send_tryton_url(self, path):
        self.send_response(300)
        hostname = (config.get('jsonrpc', 'hostname')
            or unicode(socket.getfqdn(), 'utf8'))
        hostname = '.'.join(encodings.idna.ToASCII(part) for part in
            hostname.split('.'))
        values = {
            'hostname': hostname,
            'path': path,
            }
        content = BytesIO()

        def write(str_):
            content.write(str_.encode('utf-8'))
        write('<html')
        write('<head>')
        write('<meta http-equiv="Refresh" '
            'content="0;url=tryton://%(hostname)s%(path)s"/>' % values)
        write('<title>Moved</title>')
        write('</head>')
        write('<body>')
        write('<h1>Moved</h1>')
        write('<p>This page has moved to '
            '<a href="tryton://%(hostname)s%(path)s">'
            'tryton://%(hostname)s%(path)s</a>.</p>' % values)
        write('</body>')
        write('</html>')
        length = content.tell()
        content.seek(0)
        self.send_header('Location', 'tryton://%(hostname)s%(path)s' % values)
        self.send_header('Content-type', 'text/html')
        self.send_header('Content-Length', str(length))
        self.end_headers()
        self.copyfile(content, self.wfile)
        content.close()
Example #6
0
def resguardoPdf(request, pk):

    resguardo = Resguardo.objects.get(id=pk)
    nombre = 'resguardo_' + str(resguardo.id)
    response = HttpResponse(content_type='application/pdf')
    response['Content-Disposition'] = 'attachment;filename=resguardo.pdf'
    buffer = BytesIO()
    c = canvas.Canvas(buffer, pagesize=letter)

    def header():

        c.drawImage('static/images/CFPPCH.png', 10, 670, 130, 130)
        c.setLineWidth(.3)
        c.setFont('Helvetica-Bold', 20)
        c.drawString(120, 750, 'CEFPP')
        c.drawString(160, 740, )

        c.setFont('Helvetica-Bold', 15)
        c.drawString(480, 750, 'Inventario')

    c.setFillColorRGB(255, 0, 0)
    c.setFont('Helvetica', 12)
    c.drawString(485, 735, resguardo.inventario)

    c.line(460, 747, 560, 747)

    header()

    c.showPage()
    c.save()
    pdf = buffer.getvalue()
    buffer.close()
    response.write(pdf)
    return response
Example #7
0
    def process(self, response:Response, responseCnt:ResponseContent, **keyargs):
        '''
        @see: HandlerProcessorProceed.process
        '''
        assert isinstance(response, Response), 'Invalid response %s' % response
        assert isinstance(responseCnt, ResponseContent), 'Invalid response content %s' % responseCnt

        if response.isSuccess is False: return  # Skip in case the response is in error
        if Response.encoder not in response: return  # Skip in case there is no encoder to render
        assert callable(response.renderFactory), 'Invalid response renderer factory %s' % response.renderFactory

        output = BytesIO()
        render = response.renderFactory(output)
        assert isinstance(render, IRender), 'Invalid render %s' % render

        resolve = Resolve(response.encoder).request(value=response.obj, render=render, **response.encoderData or {})

        if not self.allowChunked and ResponseContent.length not in responseCnt:
    
            while resolve.has(): resolve.do()
            content = output.getvalue()
            responseCnt.length = len(content)
            responseCnt.source = (content,)
            output.close()
        else:
            responseCnt.source = self.renderAsGenerator(resolve, output, self.bufferSize)
Example #8
0
def doImageRender(graphClass, graphOptions):
    pngData = BytesIO()
    img = graphClass(**graphOptions)
    img.output(pngData)
    imageData = pngData.getvalue()
    pngData.close()
    return imageData
Example #9
0
    def _run_step(self, step_num, step_type, input_path, output_path,
                  working_dir, env, child_stdin=None):
        step = self._get_step(step_num)

        # if no mapper, just pass the data through (see #1141)
        if step_type == 'mapper' and not step.get('mapper'):
            copyfile(input_path, output_path)
            return

        # Passing local=False ensures the job uses proper names for file
        # options (see issue #851 on github)
        common_args = (['--step-num=%d' % step_num] +
                       self._mr_job_extra_args(local=False))

        if step_type == 'mapper':
            child_args = (
                ['--mapper'] + [input_path] + common_args)
        elif step_type == 'reducer':
            child_args = (
                ['--reducer'] + [input_path] + common_args)
        elif step_type == 'combiner':
            child_args = ['--combiner'] + common_args + ['-']

        has_combiner = (step_type == 'mapper' and 'combiner' in step)

        try:
            # Use custom stdout
            if has_combiner:
                child_stdout = BytesIO()
            else:
                child_stdout = open(output_path, 'wb')

            with save_current_environment():
                with save_cwd():
                    os.environ.update(env)
                    os.chdir(working_dir)

                    child_instance = self._mrjob_cls(args=child_args)
                    child_instance.sandbox(stdin=child_stdin,
                                           stdout=child_stdout)
                    child_instance.execute()

            if has_combiner:
                sorted_lines = sorted(child_stdout.getvalue().splitlines())
                combiner_stdin = BytesIO(b'\n'.join(sorted_lines))
            else:
                child_stdout.flush()
        finally:
            child_stdout.close()

        while len(self._counters) <= step_num:
            self._counters.append({})
        parse_mr_job_stderr(child_instance.stderr.getvalue(),
                            counters=self._counters[step_num])

        if has_combiner:
            self._run_step(step_num, 'combiner', None, output_path,
                           working_dir, env, child_stdin=combiner_stdin)

            combiner_stdin.close()
Example #10
0
 def get(self, url, accept = "*/*", charset = "UTF-8", referer = ""):
     curl = pycurl.Curl()
     print("get: %s", url)
     buf = BytesIO()
     try:
         curl.setopt(pycurl.COOKIEFILE, CookieFile)
         curl.setopt(pycurl.COOKIEJAR, CookieFile)
         curl.setopt(pycurl.FOLLOWLOCATION, 1)    # 允许跟踪来源
         curl.setopt(pycurl.MAXREDIRS, 5)    # 设置最大重定向次数
         curl.setopt(pycurl.TIMEOUT, 80)    # 连接超时设置
         # curl.setopt(pycurl.VERBOSE, 1)    # verbose
         curl.setopt(pycurl.WRITEFUNCTION, buf.write)
         curl.setopt(pycurl.URL, url)
         curl.setopt(pycurl.HTTPHEADER, ["Accept: {0}", "Accept-Charset: {1}", "Referer: {2}".format(accept, charset, referer)])
         curl.perform()
         for line in curl.getinfo(pycurl.INFO_COOKIELIST):
             self._parseCookie(line)
         result = buf.getvalue()    # 得到缓冲区的数据
         buf.close()
         curl.close()
         del buf
         del curl
         return self.bytesToStr(result)
     except Exception as e:
         print(e)
         del buf
         del curl
         return ""
Example #11
0
def convert_svg2png(infile, outfile, w, h):
    """
        Converts svg files to png using Cairosvg or Inkscape
        @file_path : String; the svg file absolute path
        @dest_path : String; the png file absolute path
    """
    if use_inkscape:
        p = Popen(["inkscape", "-z", "-f", infile, "-e", outfile,
                   "-w", str(w), "-h", str(h)],
                  stdout=PIPE, stderr=PIPE)
        output, err = p.communicate()
    else:
        handle = Rsvg.Handle()
        svg = handle.new_from_file(infile)
        dim = svg.get_dimensions()

        img = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
        ctx = cairo.Context(img)
        ctx.scale(w / dim.width, h / dim.height)
        svg.render_cairo(ctx)

        png_io = BytesIO()
        img.write_to_png(png_io)
        with open(outfile, 'wb') as fout:
            fout.write(png_io.getvalue())
        svg.close()
        png_io.close()
        img.finish()
Example #12
0
def generate_slip_pdf(ansattPeriode):
    """Lager PDF med lønnslippen til den annsatte for perioden"""
    buffer = BytesIO()
    # Create the PDF object, using the response object as its "file."
    c = canvas.Canvas(buffer, encrypt=ansattPeriode.ansatt.f_nr)
    
    c.setAuthor("Django_regnskap")
    c.setTitle("Lonn %s"%ansattPeriode.periode.navn)
    c.setFont("Helvetica", 12)
    
    # Draw things on the PDF. Here's where the PDF generation happens.
    # See the ReportLab documentation for the full list of functionality.
    
    if ansattPeriode.periode.finalized == False: #Kladdet
        draw_kladd(c)

    # Tegn lønnslipp for den ansatte
    draw_slip(c, ansattPeriode)

    # Close the PDF object cleanly, and we're done.
    c.showPage()
    c.save()
    pdf = buffer.getvalue()
    buffer.close()
    return pdf
Example #13
0
def generate_periode_pdf(periode):
    """Lager PDF med løalle bilaf for perioden"""
    buffer = BytesIO()
    # Create the PDF object, using the response object as its "file."
    c = canvas.Canvas(buffer)
    
    c.setAuthor("Django_regnskap")
    c.setTitle("Lonn %s"%periode.navn)
    c.setFont("Helvetica", 12)
    
    # Draw things on the PDF. Here's where the PDF generation happens.
    # See the ReportLab documentation for the full list of functionality.
    
    draw_journal(c, periode)
    if not periode.finalized:
        draw_kladd(c)
    c.showPage()
    
    # Tegn lønnslipp for den ansatte
    for ansattPeriode in periode.ansatte:
        draw_slip(c, ansattPeriode)
        if not periode.finalized:
            draw_kladd(c)
        c.showPage()
    
    # Close the PDF object cleanly, and we're done.
    c.save()
    pdf = buffer.getvalue()
    buffer.close()
    return pdf
Example #14
0
File: ipg.py Project: Cadair/ginga
def showplt():
    buf = BytesIO()
    plt.savefig(buf, bbox_inches=0)
    img = Image(data=bytes(buf.getvalue()),
                   format='png', embed=True)
    buf.close()
    return img
Example #15
0
def generar_pdf_Producto(request):
    response = HttpResponse(content_type='application/pdf')
    pdf_name = "productos.pdf" 
    buff = BytesIO()
    doc = SimpleDocTemplate(buff,
                            pagesize=letter,
                            rightMargin=40,
                            leftMargin=40,
                            topMargin=60,
                            bottomMargin=18,
                            )
    productos = []
    styles = getSampleStyleSheet()
    header = Paragraph("Listado de Productos", styles['Heading1'])
    productos.append(header)
    headings = ('Proveedor','Nombre', 'Descripcion', 'Marca', 'Precio','Stock Actual')
    allproductos = [(p.prov_id, p.pro_nom, p.pro_des, p.pro_mar, p.pro_pre, p.pro_sto_act) for p in Producto.objects.all()]

    t = Table([headings] + allproductos)
    t.setStyle(TableStyle(
        [
            ('GRID', (0, 0), (6, -1), 1, colors.dodgerblue),
            ('LINEBELOW', (0, 0), (-1, 0), 2, colors.darkblue),
            ('BACKGROUND', (0, 0), (-1, 0), colors.dodgerblue)
        ]
    ))
    
    productos.append(t)
    doc.build(productos)
    response.write(buff.getvalue())
    buff.close()
    return response
    def generate_pdf_report(self, report_id, recipient):
        # setup
        report_buffer = BytesIO()
        doc = SimpleDocTemplate(
            report_buffer,
            pagesize=letter,
            rightMargin=72, leftMargin=72,
            topMargin=72, bottomMargin=72,
        )

        # content fill
        self.pdf_elements.extend(
            api.NotificationApi.get_cover_page(
                report_id=report_id,
                recipient=recipient,
            ),
        )
        self.pdf_elements.extend(self.report_page(self.report))
        self.pdf_elements.append(
            Paragraph(
                "Report Questions",
                self.section_title_style))
        self.render_questions(self.report_data)

        # teardown
        doc.build(
            self.pdf_elements,
            onFirstPage=self.get_header_footer(recipient),
            onLaterPages=self.get_header_footer(recipient),
            canvasmaker=NumberedCanvas,
        )
        result = report_buffer.getvalue()
        report_buffer.close()
        return result
    def generate(cls, pdf_input_data: dict):
        # setup
        self = cls()
        reports = pdf_input_data.get('reports', [])
        matches = pdf_input_data.get('matches', [])
        report_buffer = BytesIO()
        doc = SimpleDocTemplate(
            report_buffer,
            pagesize=letter,
            rightMargin=72, leftMargin=72,
            topMargin=72, bottomMargin=72,
        )

        # content fill
        self.pdf_elements.extend(self.cover_page())
        self.pdf_elements.extend(self.report_pages(reports))
        self.pdf_elements.extend(self.match_pages_empty_identifier(matches))

        # teardown
        doc.build(
            self.pdf_elements,
            canvasmaker=NumberedCanvas,
        )
        result = report_buffer.getvalue()
        report_buffer.close()
        return result
Example #18
0
def generar_pdf(request):
    print ("Genero el PDF");
    response = HttpResponse(content_type='application/pdf')
    pdf_name = "proveedores.pdf"  # llamado clientes
    # la linea 26 es por si deseas descargar el pdf a tu computadora
    # response['Content-Disposition'] = 'attachment; filename=%s' % pdf_name
    buff = BytesIO()
    doc = SimpleDocTemplate(buff,
                            pagesize=letter,
                            rightMargin=40,
                            leftMargin=40,
                            topMargin=60,
                            bottomMargin=18,
                            )
    proveedores = []
    styles = getSampleStyleSheet()
    header = Paragraph("Listado de Proveedores", styles['Heading1'])
    proveedores.append(header)
    headings = ('No. Proveedor','Nombre','RFC','Giro','Direccion','Ciudad','Estado','Pais','Telefono','Correo','Comentario')
    allproveedores = [(p.num_proveedor, p.nombre, p.RFC ,p.giro ,p.direccion ,p.ciudad ,p.estado ,p.pais ,p.telefono ,p.correo ,p.comentario) for p in Proveedor.objects.all()]
    print (allproveedores);

    t = Table([headings] + allproveedores)
    t.setStyle(TableStyle(
        [
            ('GRID', (0, 0), (12, -1), 1, colors.dodgerblue),
            ('LINEBELOW', (0, 0), (-1, 0), 2, colors.darkblue),
            ('BACKGROUND', (0, 0), (-1, 0), colors.dodgerblue)
        ]
    ))
    proveedores.append(t)
    doc.build(proveedores)
    response.write(buff.getvalue())
    buff.close()
    return response
Example #19
0
 def _fetch_year(self,year):
     if len(self.station_id) == 6:
         # given station id is the six digit code, so need to get full name
         with resource_stream('eemeter.resources','GSOD-ISD_station_index.json') as f:
             station_index = json.loads(f.read().decode("utf-8"))
         # take first station in list
         potential_station_ids = station_index[self.station_id]
     else:
         # otherwise, just use the given id
         potential_station_ids = [self.station_id]
     ftp = ftplib.FTP("ftp.ncdc.noaa.gov")
     ftp.login()
     string = BytesIO()
     # not every station will be available in every year, so use the
     # first one that works
     for station_id in potential_station_ids:
         try:
             ftp.retrbinary('RETR /pub/data/noaa/{year}/{station_id}-{year}.gz'.format(station_id=station_id,year=year),string.write)
             break
         except (IOError,ftplib.error_perm):
             pass
     string.seek(0)
     f = gzip.GzipFile(fileobj=string)
     self._add_file(f)
     string.close()
     f.close()
     ftp.quit()
Example #20
0
def generate_certificate(description_of_items,cost_of_items,amount,cost,qty,raise_for,request ):

    buffer = BytesIO()
    styleSheet = getSampleStyleSheet()
    style = styleSheet['Normal']
    canv = Canvas('my_pdf.pdf')
    canv.setFillColorRGB(0, 0, 255)
    canv.setFont('Helvetica-Bold', 44, leading=None)
    canv.drawCentredString(102, 800, "INVOICE")
    canv.setFont('Helvetica-Bold', 8, leading=None)
    #canv.drawCentredString(38, 824, "From:")
    b = Company_credentials.objects.get(user=request.user)
    canv.setFillColorRGB(0, 0, 255)
    canv.drawCentredString(480, 826, b.company_name)
    canv.drawCentredString(480, 813, b.email)
    canv.drawCentredString(480, 801, b.country + ',' + b.phone_number)
    #canv.drawCentredString(480, 790, b.email)
    canv.setFillColorRGB(0, 0, 0)

    canv.drawCentredString(480, 790, "Raised on:" + str(datetime.date.today()) )
    canv.line(0, 785, 800, 785)
    canv.setFont('Helvetica', 21, leading=None)
    canv.setFillColorRGB(0, 0, 255)
    canv.drawCentredString(68, 760, "Description:")
    canv.setFillColorRGB(0, 0, 0)
    canv.setFont('Helvetica-Bold', 14, leading=None)
    canv.drawCentredString(120, 730, "ITEMS")
    canv.drawCentredString(320, 730, "RATE")
    canv.drawCentredString(410, 730, "QTY")
    canv.drawCentredString(500, 730, "AMOUNT")
    canv.setFont('Helvetica', 8, leading=None)
    y_coordinate = 710
    chaska = 0
    length = len(description_of_items)
    for chaska in range(length):
        canv.drawCentredString(120, y_coordinate,description_of_items[chaska])
        canv.drawCentredString(320, y_coordinate, str(cost_of_items[chaska]))
        canv.drawCentredString(410, y_coordinate, str(qty[chaska]))
        canv.drawCentredString(500, y_coordinate, '$' + str(amount[chaska]))
        y_coordinate = y_coordinate - 15
    y_coordinate = y_coordinate - 25
    canv.line(310, y_coordinate, 580, y_coordinate)
    canv.setFont('Helvetica-Bold', 12, leading=None)
    canv.drawCentredString(410, y_coordinate-16, "Total")
    canv.drawCentredString(500, y_coordinate-16, '$' + str(cost))
    canv.setFillColorRGB(0,0,255)
    canv.setFont('Helvetica', 16, leading=None)
    canv.drawCentredString(55, y_coordinate-16, "Raised For:")
    canv.setFillColorRGB(0, 0, 0)
    P = Paragraph(raise_for, style)
    aW = 180
    aH = y_coordinate-46
    w, h = P.wrap(aW, aH)  # find required space
    if w <= aW and h <= aH:
        P.drawOn(canv, 12, aH)
        aH = aH - h  # reduce the available height
    canv.save()
    pdf = buffer.getvalue()
    buffer.close()
    return pdf
Example #21
0
 def export(self):
     """Write the settings objects to a string"""
     f = BytesIO()
     self.settings.write(f)
     contents = f.getvalue()
     f.close()
     return contents
Example #22
0
    def post(self):
        self.state.clear()

        content = self.request.files.values()[0][0]["body"]
        bio = BytesIO(content)
        self.state.load_flows(FlowReader(bio).stream())
        bio.close()
Example #23
0
def generate_pdf(request, site):

    reportPdfUrl = 'http://%s/report/view/%s' % (request.META['HTTP_HOST'],str(site.pk))

    outputStream = BytesIO()
    reportPdfFile = '%s/download_%s.pdf' % (settings.DOWNLOAD_ROOT, site.pk)

    wkhtmltopdfBinLocationString = '/usr/local/bin/wkhtmltopdf'
    wkhtmltopdfBinLocationBytes = wkhtmltopdfBinLocationString.encode('utf-8')
    config = pdfkit.configuration(wkhtmltopdf=wkhtmltopdfBinLocationBytes)

    pdfkit.from_url(reportPdfUrl, reportPdfFile, configuration=config, options={
        'javascript-delay': 1500,
        'load-error-handling': 'ignore'
    })

    new_pdf = PdfFileMerger()
    new_pdf.append('%s/download_%s.pdf' % (settings.DOWNLOAD_ROOT, site.pk))

    # finally, return output
    new_pdf.write(outputStream)

    final = outputStream.getvalue()
    outputStream.close()
    os.remove(reportPdfFile)

    return final
Example #24
0
def loads(b):
    '''Read a RBDEF object from the byte string ``b``.'''
    of = BytesIO(b)
    try:
        return load(of)
    finally:
        of.close()
Example #25
0
 def do_POST(self):
     """Serve a POST request."""
     r, info = self.deal_post_data()
     print((r, info, "by: ", self.client_address))
     f = BytesIO()
     f.write(b'<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">')
     f.write(b"<html>\n<title>Upload Result Page</title>\n")
     f.write(b"<body>\n<h2>Upload Result Page</h2>\n")
     f.write(b"<hr>\n")
     if r:
         f.write(b"<strong>Success:</strong>")
     else:
         f.write(b"<strong>Failed:</strong>")
     f.write(info.encode())
     f.write(("<br><a href=\"%s\">back</a>" % self.headers['referer']).encode())
     f.write(b"<hr><small>Powerd By: bones7456, check new version at ")
     f.write(b"<a href=\"http://li2z.cn/?s=SimpleHTTPServerWithUpload\">")
     f.write(b"here</a>.</small></body>\n</html>\n")
     length = f.tell()
     f.seek(0)
     self.send_response(200)
     self.send_header("Content-type", "text/html")
     self.send_header("Content-Length", str(length))
     self.end_headers()
     if f:
         self.copyfile(f, self.wfile)
         f.close()
Example #26
0
def pdf_view(request):
	html_content = "<h1>***Mensaje***</h1>"
	
	response = HttpResponse(content_type='application/pdf')
	response['Content-Disposition'] = 'attachment; filename="somefilename.pdf"'
	buffer = BytesIO()
	# Create the PDF object, using the BytesIO object as its "file."
	p = canvas.Canvas(buffer,pagesize=A4)
	
	#Header
	#p.setLinewidth(25)
	p.setFont('Helvetica',22)
	p.drawString(30,750,'Atom')
	
	p.setFont('Helvetica',12)
	p.drawString(30,735,'Reporte')

	p.setFont('Helvetica-Bold',12)
	p.drawString(480,750,"23/04/2016")

	p.line(460,747,560,747)
	# Close the PDF object cleanly.
	p.showPage()
	p.save()


	# Get the value of the BytesIO buffer and write it to the response.
	pdf = buffer.getvalue()
	buffer.close()
	response.write(pdf)
	return response
Example #27
0
def limit_featurecollection(content, limit=200):
    """
    Parse a WFS FeatureCollection XML string and produce a
    similar string with at most 200 features.
    """

    parser = make_parser()

    _input = BytesIO(content)

    input_source = InputSource()
    input_source.setByteStream(_input)

    output = StringIO()
    downstream = XMLGenerator(output, 'utf-8')

    _filter = _XMLFilterLimit(parser, downstream, limit=limit)
    _filter.parse(input_source)

    result = output.getvalue()

    _input.close()
    output.close()

    return result
Example #28
0
def get_cadc_headers(uri):
    """
    Creates the FITS headers object by fetching the FITS headers of a CADC
    file. The function takes advantage of the fhead feature of the CADC
    storage service and retrieves just the headers and no data, minimizing
    the transfer time.

    The file must be public, because the header retrieval is done as an
    anonymous user.

    :param uri: CADC URI
    :return: a string of keyword/value pairs.
    """
    file_url = parse.urlparse(uri)
    # create possible types of subjects
    subject = net.Subject()
    client = CadcDataClient(subject)
    # do a fhead on the file
    archive, file_id = file_url.path.split('/')
    b = BytesIO()
    b.name = uri
    client.get_file(archive, file_id, b, fhead=True)
    fits_header = b.getvalue().decode('ascii')
    b.close()
    return fits_header
Example #29
0
def generar_pdf_Usuario(request):
    response = HttpResponse(content_type='application/pdf')
    pdf_name = "usuarios.pdf"
    buff = BytesIO()
    doc = SimpleDocTemplate(buff,
                            pagesize=letter,
                            rightMargin=40,
                            leftMargin=40,
                            topMargin=60,
                            bottomMargin=18,
                            )
    usuarios = []
    styles = getSampleStyleSheet()
    header = Paragraph("Listado de Usuarios", styles['Heading1'])
    usuarios.append(header)
    headings = ('Cedula', 'Nombres', 'Apellidos', 'Sexo','Direccion', 'Telefono', 'Email')
    allusuarios = [(p.usu_ced, p.usu_nom, p.usu_ape, p.usu_sex, p.usu_dir, p.usu_tel, p.usu_ema) for p in Usuario.object.all()]

    t = Table([headings] + allusuarios)
    t.setStyle(TableStyle(
        [
            ('GRID', (0, 0), (6, -1), 1, colors.dodgerblue),
            ('LINEBELOW', (0, 0), (-1, 0), 2, colors.darkblue),
            ('BACKGROUND', (0, 0), (-1, 0), colors.dodgerblue)
        ]
    ))

    usuarios.append(t)
    doc.build(usuarios)
    response.write(buff.getvalue())
    buff.close()
    return response
Example #30
0
 def replace(self, text, rect_elt):
   if not text.startswith('#code128'):
     return
   cmd = Command(text)
   
   attrs = elt_attrs_to_dict(rect_elt, ['x', 'y', 'height', 'width'])
   attrs['preserveAspectRatio'] = cmd.get_kw_arg('align', 'rescale alignment', 'xMidYMid')
   quiet = cmd.get_kw_arg('quiet', 'add quiet zone', default='True')
   if quiet in ['true', 'True']:
     quiet = True
   elif quiet in ['False', 'false']:
     quiet = False
   else:
     raise CommandSyntaxError("quiet='%s' not a bool" % quiet)
   thickness = int(cmd.get_kw_arg('thickness', 'barcode thickness', 3))
   val = cmd.get_pos_arg(0, 'barcode value')
   cmd.finalize()
   
   image = Code128.code128_image(val, thickness=thickness, quiet_zone=quiet)
   image_output = BytesIO()
   image.save(image_output, format='PNG')
   image_base64 = base64.b64encode(image_output.getvalue())
   image_output.close()
   data_string = "data:image/png;base64," + image_base64.decode("utf-8") 
   attrs['{http://www.w3.org/1999/xlink}href'] = data_string
   
   image_elt = ET.Element('image', attrs)
   
   return [image_elt]
Example #31
0
def retreive_compound_sdf_pubchem(searchproperty,searchvalue,outputfile=None,in3D=False):
    URL = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/'
    errdata = dict()
    data = None
    do_not_skip_on_debug = False
    SIZE_LIMIT = 52428800
    RECIEVE_TIMEOUT = 120
    args = []
    try:
        args.append(str(searchproperty))
        args.append(urllib.parse.quote(str(searchvalue),safe=''))
        if in3D:
            recordtype='3d'
        else:
            recordtype='2d'
        
        response = requests.get(URL+'/'.join(args)+'/SDF?record_type='+recordtype,timeout=30,stream=True,verify=True)
        response.raise_for_status()
        if outputfile:
            fileh = open(outputfile,'w+b')
        else:
            fileh = BytesIO(b'')
        size = 0
        start = time.time()
        chunks = response.iter_content(chunk_size=524288)
        for chunk in chunks:
            size += len(chunk)
            if size > SIZE_LIMIT:
                raise StreamSizeLimitError('response too large')
            if time.time() - start > RECIEVE_TIMEOUT:
                raise StreamTimeoutError('timeout reached')
            fileh.write(chunk)

        response.close()
        if not outputfile:
            data = fileh.read()
            fileh.close()
        fileh.seek(0)
        mol = open_molecule_file(fileh,filetype='sdf')
        del mol

        return(data,errdata)
           
          
    except HTTPError:
      errdata['Error'] = True
      errdata['ErrorType'] = 'HTTPError'
      errdata['status_code'] = response.status_code
      errdata['reason'] = response.reason
    except ConnectionError as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'ConnectionError'
      errdata['reason'] = 'Cannot connect.'
    except Timeout as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'Timeout'
      errdata['reason'] = 'Timeout exceeded.'
    except TooManyRedirects as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'TooManyRedirects'
      errdata['reason'] = 'Too many redirects.'
    except StreamSizeLimitError as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'StreamSizeLimitError'
      errdata['reason'] = str(e)
    except StreamTimeoutError as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'StreamTimeoutError'
      errdata['reason'] = str(e)
    except ParsingError as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'ParsingError'
      errdata['reason'] = str(e)
    except:
      errdata['Error'] = True
      errdata['ErrorType'] = 'Internal'
      errdata['reason'] = ''
      do_not_skip_on_debug = True
      raise
    finally:
      try:
        response.close()
      except:
        pass
      try:
        fileh.close()
      except:
        pass
      if not (settings.DEBUG and do_not_skip_on_debug):
        return(data,errdata)    
Example #32
0
        def do_POST(self):
            """Serve a POST request."""
            # First, we save the post data
            r, info = self.deal_post_data()
            print((r, info, "by: ", self.client_address))

            # And write the response web page
            f = BytesIO()
            f.write(
                b"<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\"><html>"
            )
            f.write(b"<title>qr-filetransfer</title>")
            f.write(
                b"<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">"
            )
            f.write(
                b"<link href=\"https://fonts.googleapis.com/css?family=Comfortaa\" rel=\"stylesheet\">"
            )
            f.write(
                b"<link rel=\"icon\" href=\"https://raw.githubusercontent.com/sdushantha/qr-filetransfer/master/logo.png\" type=\"image/png\">"
            )
            f.write(b"<center>")
            f.write(b"<body>")
            f.write(
                b"<h2 style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">Upload Result Page</h2>"
            )
            f.write(b"<hr>")

            if r:
                f.write(
                    b"<strong style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">Success: </strong>"
                )
            else:
                f.write(
                    b"<strong style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">Failed: </strong>"
                )

            f.write((
                "<span style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">%s</span><br>"
                % info).encode())
            f.write((
                "<br><a href=\"%s\" style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">back</a>"
                % self.headers['referer']).encode())
            f.write(
                b"<hr><small style=\"font-family: 'Comfortaa', cursive;color:'#263238';\">Powerd By: "
            )
            f.write(b"<a href=\"https://github.com/sdushantha/\">")
            f.write(b"sdushantha</a> and \n")
            f.write(b"<a href=\"https://github.com/npes87184/\">")
            f.write(b"npes87184</a>, check new version at \n")
            f.write(b"<a href=\"https://pypi.org/project/qr-filetransfer/\">")
            f.write(b"here</a>.</small></body>\n</html>\n")
            length = f.tell()
            f.seek(0)
            self.send_response(200)
            self.send_header("Content-type", "text/html; charset=utf-8")
            self.send_header("Content-Length", str(length))
            self.end_headers()
            if f:
                self.copyfile(f, self.wfile)
                f.close()
Example #33
0
    def get_code(self):
        """Actually download the code.

        Following the standard order of execution, this is the first method
        that might actually modify the system (by downloading code).

        Raises
        ------
        DesiInstallException
            If any download errors are detected.
        """
        self.working_dir = os.path.join(os.path.abspath('.'),
                                        '{0}-{1}'.format(self.baseproduct,
                                                         self.baseversion))
        if os.path.isdir(self.working_dir):
            self.log.info("Detected old working directory, %s. Deleting...",
                          self.working_dir)
            self.log.debug("shutil.rmtree('%s')", self.working_dir)
            if not self.options.test:
                shutil.rmtree(self.working_dir)
        if self.github:
            if self.is_trunk or self.is_branch:
                if self.is_branch:
                    try:
                        r = requests.get(os.path.join(self.fullproduct, 'tree',
                                                      self.baseversion))
                        r.raise_for_status()
                    except requests.exceptions.HTTPError:
                        message = ("Branch {0} does not appear to exist. " +
                                   "HTTP response was {1:d}.").format(
                                   self.baseversion, r.status_code)
                        self.log.critical(message)
                        raise DesiInstallException(message)
                command = ['git', 'clone', '-q', self.product_url,
                           self.working_dir]
                self.log.debug(' '.join(command))
                if self.options.test:
                    out, err = 'Test Mode.', ''
                else:
                    proc = Popen(command, universal_newlines=True,
                                 stdout=PIPE, stderr=PIPE)
                    out, err = proc.communicate()
                self.log.debug(out)
                if len(err) > 0:
                    message = ("git error while downloading product code: " +
                               err)
                    self.log.critical(message)
                    raise DesiInstallException(message)
                if self.is_branch:
                    original_dir = os.getcwd()
                    self.log.debug("os.chdir('%s')", self.working_dir)
                    if not self.options.test:
                        os.chdir(self.working_dir)
                    command = ['git', 'checkout', '-q', '-b', self.baseversion,
                               'origin/'+self.baseversion]
                    self.log.debug(' '.join(command))
                    if self.options.test:
                        out, err = 'Test Mode.', ''
                    else:
                        proc = Popen(command, universal_newlines=True,
                                     stdout=PIPE, stderr=PIPE)
                        out, err = proc.communicate()
                    self.log.debug(out)
                    if len(err) > 0:
                        message = ("git error while changing branch:" +
                                   " {0}".format(err))
                        self.log.critical(message)
                        raise DesiInstallException(message)
                    self.log.debug("os.chdir('%s')", original_dir)
                    if not self.options.test:
                        os.chdir(original_dir)
            else:
                if self.options.test:
                    self.log.debug("Test Mode. Skipping download of %s.",
                                   self.product_url)
                else:
                    try:
                        r = requests.get(self.product_url)
                        r.raise_for_status()
                    except requests.exceptions.HTTPError:
                        message = ("Error while downloading {0}, " +
                                   "HTTP response was {1:d}.").format(
                                   self.product_url, r.status_code)
                        self.log.critical(message)
                        raise DesiInstallException(message)
                    try:
                        tgz = StringIO(r.content)
                        tf = tarfile.open(fileobj=tgz, mode='r:gz')
                        tf.extractall()
                        tf.close()
                        tgz.close()
                        self.working_dir = os.path.join(os.path.abspath('.'),
                                                        '{0}-{1}'.format(self.baseproduct,
                                                                         self.baseversion))
                        if self.baseversion.startswith('v'):
                            nov = os.path.join(os.path.abspath('.'),
                                               '{0}-{1}'.format(self.baseproduct,
                                                                self.baseversion[1:]))
                            if os.path.exists(nov):
                                self.working_dir = nov
                    except tarfile.TarError as e:
                        message = "tar error while expanding product code!"
                        self.log.critical(message)
                        raise DesiInstallException(message)
        else:
            if self.is_trunk or self.is_branch:
                get_svn = 'checkout'
            else:
                get_svn = 'export'
            command = ['svn', '--non-interactive', '--username',
                       self.options.username, get_svn, self.product_url,
                       self.working_dir]
            self.log.debug(' '.join(command))
            if self.options.test:
                out, err = 'Test Mode.', ''
            else:
                proc = Popen(command, universal_newlines=True,
                             stdout=PIPE, stderr=PIPE)
                out, err = proc.communicate()
            self.log.debug(out)
            if len(err) > 0:
                message = ("svn error while downloading product " +
                           "code: {0}".format(err))
                self.log.critical(message)
                raise DesiInstallException(message)
        return
def extractText(pdf_contents):
    pdf_file = BytesIO(pdf_contents)
    text = extract_text(pdf_file)
    pdf_file.close()
    return text
Example #35
0
class SegmentBuffer:
    """Buffer for writing a sequence of packets to the output as a segment."""
    def __init__(
        self,
        hass: HomeAssistant,
        outputs_callback: Callable[[], Mapping[str, StreamOutput]],
    ) -> None:
        """Initialize SegmentBuffer."""
        self._stream_id: int = 0
        self._hass = hass
        self._outputs_callback: Callable[[], Mapping[
            str, StreamOutput]] = outputs_callback
        # sequence gets incremented before the first segment so the first segment
        # has a sequence number of 0.
        self._sequence = -1
        self._segment_start_dts: int = cast(int, None)
        self._memory_file: BytesIO = cast(BytesIO, None)
        self._av_output: av.container.OutputContainer = None
        self._input_video_stream: av.video.VideoStream = None
        self._input_audio_stream: av.audio.stream.AudioStream | None = None
        self._output_video_stream: av.video.VideoStream = None
        self._output_audio_stream: av.audio.stream.AudioStream | None = None
        self._segment: Segment | None = None
        # the following 3 member variables are used for Part formation
        self._memory_file_pos: int = cast(int, None)
        self._part_start_dts: int = cast(int, None)
        self._part_has_keyframe = False
        self._stream_settings: StreamSettings = hass.data[DOMAIN][
            ATTR_SETTINGS]
        self._start_time = datetime.datetime.utcnow()

    def make_new_av(
        self,
        memory_file: BytesIO,
        sequence: int,
        input_vstream: av.video.VideoStream,
    ) -> av.container.OutputContainer:
        """Make a new av OutputContainer."""
        return av.open(
            memory_file,
            mode="w",
            format=SEGMENT_CONTAINER_FORMAT,
            container_options={
                **{
                    # Removed skip_sidx - see https://github.com/home-assistant/core/pull/39970
                    # "cmaf" flag replaces several of the movflags used, but too recent to use for now
                    "movflags": "frag_custom+empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer",
                    # Sometimes the first segment begins with negative timestamps, and this setting just
                    # adjusts the timestamps in the output from that segment to start from 0. Helps from
                    # having to make some adjustments in test_durations
                    "avoid_negative_ts": "make_non_negative",
                    "fragment_index": str(sequence + 1),
                    "video_track_timescale": str(
                        int(1 / input_vstream.time_base)),
                },
                # Only do extra fragmenting if we are using ll_hls
                # Let ffmpeg do the work using frag_duration
                # Fragment durations may exceed the 15% allowed variance but it seems ok
                **({
                    "movflags":
                    "empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer",
                    # Create a fragment every TARGET_PART_DURATION. The data from each fragment is stored in
                    # a "Part" that can be combined with the data from all the other "Part"s, plus an init
                    # section, to reconstitute the data in a "Segment".
                    # frag_duration seems to be a minimum threshold for determining part boundaries, so some
                    # parts may have a higher duration. Since Part Target Duration is used in LL-HLS as a
                    # maximum threshold for part durations, we scale that number down here by .85 and hope
                    # that the output part durations stay below the maximum Part Target Duration threshold.
                    # See https://datatracker.ietf.org/doc/html/draft-pantos-hls-rfc8216bis#section-4.4.4.9
                    "frag_duration":
                    str(self._stream_settings.part_target_duration * 1e6),
                } if self._stream_settings.ll_hls else {}),
            },
        )

    def set_streams(
        self,
        video_stream: av.video.VideoStream,
        audio_stream: Any,
        # no type hint for audio_stream until https://github.com/PyAV-Org/PyAV/pull/775 is merged
    ) -> None:
        """Initialize output buffer with streams from container."""
        self._input_video_stream = video_stream
        self._input_audio_stream = audio_stream

    def reset(self, video_dts: int) -> None:
        """Initialize a new stream segment."""
        # Keep track of the number of segments we've processed
        self._sequence += 1
        self._segment_start_dts = video_dts
        self._segment = None
        self._memory_file = BytesIO()
        self._memory_file_pos = 0
        self._av_output = self.make_new_av(
            memory_file=self._memory_file,
            sequence=self._sequence,
            input_vstream=self._input_video_stream,
        )
        self._output_video_stream = self._av_output.add_stream(
            template=self._input_video_stream)
        # Check if audio is requested
        self._output_audio_stream = None
        if self._input_audio_stream and self._input_audio_stream.name in AUDIO_CODECS:
            self._output_audio_stream = self._av_output.add_stream(
                template=self._input_audio_stream)

    def mux_packet(self, packet: av.Packet) -> None:
        """Mux a packet to the appropriate output stream."""

        # Check for end of segment
        if packet.stream == self._input_video_stream:
            if (packet.is_keyframe and
                (packet.dts - self._segment_start_dts) * packet.time_base >=
                    self._stream_settings.min_segment_duration):
                # Flush segment (also flushes the stub part segment)
                self.flush(packet, last_part=True)
                # Reinitialize
                self.reset(packet.dts)

            # Mux the packet
            packet.stream = self._output_video_stream
            self._av_output.mux(packet)
            self.check_flush_part(packet)
            self._part_has_keyframe |= packet.is_keyframe

        elif packet.stream == self._input_audio_stream:
            packet.stream = self._output_audio_stream
            self._av_output.mux(packet)

    def check_flush_part(self, packet: av.Packet) -> None:
        """Check for and mark a part segment boundary and record its duration."""
        if self._memory_file_pos == self._memory_file.tell():
            return
        if self._segment is None:
            # We have our first non-zero byte position. This means the init has just
            # been written. Create a Segment and put it to the queue of each output.
            self._segment = Segment(
                sequence=self._sequence,
                stream_id=self._stream_id,
                init=self._memory_file.getvalue(),
                # Fetch the latest StreamOutputs, which may have changed since the
                # worker started.
                stream_outputs=self._outputs_callback().values(),
                start_time=self._start_time + datetime.timedelta(
                    seconds=float(self._segment_start_dts * packet.time_base)),
            )
            self._memory_file_pos = self._memory_file.tell()
            self._part_start_dts = self._segment_start_dts
        else:  # These are the ends of the part segments
            self.flush(packet, last_part=False)

    def flush(self, packet: av.Packet, last_part: bool) -> None:
        """Output a part from the most recent bytes in the memory_file.

        If last_part is True, also close the segment, give it a duration,
        and clean up the av_output and memory_file.
        """
        # In some cases using the current packet's dts (which is the start
        # dts of the next part) to calculate the part duration will result in a
        # value which exceeds the part_target_duration. This can muck up the
        # duration of both this part and the next part. An easy fix is to just
        # use the current packet dts and cap it by the part target duration.
        current_dts = min(
            packet.dts,
            self._part_start_dts +
            self._stream_settings.part_target_duration / packet.time_base,
        )
        if last_part:
            # Closing the av_output will write the remaining buffered data to the
            # memory_file as a new moof/mdat.
            self._av_output.close()
        assert self._segment
        self._memory_file.seek(self._memory_file_pos)
        self._hass.loop.call_soon_threadsafe(
            self._segment.async_add_part,
            Part(
                duration=float(
                    (current_dts - self._part_start_dts) * packet.time_base),
                has_keyframe=self._part_has_keyframe,
                data=self._memory_file.read(),
            ),
            float((current_dts - self._segment_start_dts) *
                  packet.time_base) if last_part else 0,
        )
        if last_part:
            # If we've written the last part, we can close the memory_file.
            self._memory_file.close(
            )  # We don't need the BytesIO object anymore
        else:
            # For the last part, these will get set again elsewhere so we can skip
            # setting them here.
            self._memory_file_pos = self._memory_file.tell()
            self._part_start_dts = current_dts
        self._part_has_keyframe = False

    def discontinuity(self) -> None:
        """Mark the stream as having been restarted."""
        # Preserving sequence and stream_id here keep the HLS playlist logic
        # simple to check for discontinuity at output time, and to determine
        # the discontinuity sequence number.
        self._stream_id += 1

    def close(self) -> None:
        """Close stream buffer."""
        self._av_output.close()
        self._memory_file.close()
Example #36
0
def pdf(request, id=None):
    if not request.user.is_authenticated():
        raise Http404
    instance = get_object_or_404(Emprendedor, numero=id)
    if (instance):
        expediente_listado = Expediente.objects.all().filter(
            emprendedor=instance).order_by("fecha")

        #inicio reportlab
        response = HttpResponse(content_type='application/pdf')
        pdf_name = "expediente_%d.pdf" % instance.id
        response['Content-Disposition'] = 'filename=%s; pagesize=A4' % pdf_name
        buff = BytesIO()
        title = "Informe_%d" % instance.id

        Story = [Spacer(1, 0)]
        style = styles["textos"]

        def myFirstPage(canvas, doc):
            canvas.saveState()
            canvas.setFont('Ubuntu-R', 8)
            canvas.drawString(
                inch, A4[1] - 50,
                "Dirección de Desarrollo Local -  Secretaría de Producción y Ambiente."
            )
            canvas.line(inch, A4[1] - 60, A4[0] - 65, A4[1] - 60)
            # numeracion de pagina
            canvas.setFont('Ubuntu-R', 8)
            canvas.drawString(
                inch, 0.75 * inch,
                "Pagina %d / Río Grande, Tierra del Fuego" % (doc.page))
            canvas.restoreState()

        def cuerpo(canvas):
            titulo = Paragraph("REPORTE DEL EXPEDIENTE", tit1)
            Story.append(titulo)
            Story.append(Spacer(1, 30))

            persona = Paragraph(
                """<b>Nombre y Apellido: </b>""" + instance.apellido +
                """, """ + instance.nombre + """<br/><b>""" +
                instance.identificacion + """: </b>""" + instance.numero +
                """<br/><b>Direccion: </b>""" + instance.direccion +
                """<br/><b>Estado Civil: </b>""" + instance.estadocivil +
                """<br/><b>Profesión: </b>""" + instance.profesion,
                styles["Normal"])
            Story.append(persona)
            Story.append(Spacer(1, 12))

            for e in expediente_listado:
                if e.activo and e.asiento != "OBSERVACIONES":
                    if e.asiento == "OTORGAMIENTO: CREDITO" or e.asiento == "OTORGAMIENTO: SUBSIDIO":
                        subt = Paragraph(
                            """<b>FECHA: </b>""" + str(e.fecha) +
                            """, <b> SE INICIÓ UN ASIENTO DE: </b>""" +
                            e.asiento +
                            """<b> CON UN MONTO TOTAL DE: </b>$""" +
                            str(e.monto) +
                            """<b> Y SE REALIZÓ EL SIGUIENTE INFORME: </b>""",
                            styles["Normal"])
                        Story.append(subt)
                        Story.append(Spacer(1, 12))

                        tex = Paragraph(e.texto, style)
                        Story.append(tex)
                        Story.append(Spacer(1, 12))
                    else:
                        subt = Paragraph(
                            """<b>FECHA: </b>""" + str(e.fecha) +
                            """, <b> SE INICIÓ UN ASIENTO DE: </b>""" +
                            e.asiento +
                            """<b> Y SE REALIZÓ EL SIGUIENTE INFORME: </b>""",
                            styles["Normal"])
                        Story.append(subt)
                        Story.append(Spacer(1, 12))

                        tex = Paragraph(e.texto, style)
                        Story.append(tex)
                        Story.append(Spacer(1, 12))

            return Story

        Story = cuerpo(canvas)
        doc = SimpleDocTemplate(buff,
                                pagesize=A4,
                                rightMargin=72,
                                leftMargin=72,
                                topMargin=72,
                                bottomMargin=75)
        #Construimos el documento a partir de los argumentos definidos
        doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myFirstPage)
        response.write(buff.getvalue())
        buff.close()
        return response
Example #37
0
def listados_pdf(request, id=None):
    if not request.user.is_authenticated():
        raise Http404
    instance = []
    if id == 'modeladodenegocios':
        instance = Emprendedor.objects.all().filter(modeladodenegocios=False)
    elif id == 'ventas':
        instance = Emprendedor.objects.all().filter(ventas=False)

    elif id == 'marketing':
        instance = Emprendedor.objects.all().filter(marketing=False)

    elif id == 'disenodemarca':
        instance = Emprendedor.objects.all().filter(disenodemarca=False)

    elif id == 'marcacolectiva':
        instance = Emprendedor.objects.all().filter(marcacolectiva=False)

    elif id == 'rentabilidad':
        instance = Emprendedor.objects.all().filter(rentabilidad=False)

    elif id == 'costos':
        instance = Emprendedor.objects.all().filter(costos=False)

    elif id == 'entrenamientointensivo':
        instance = Emprendedor.objects.all().filter(
            entrenamientointensivo=False)

    elif id == 'culturaemprendedora':
        instance = Emprendedor.objects.all().filter(culturaemprendedora=False)

    elif id == 'desarrollodeproducto':
        instance = Emprendedor.objects.all().filter(desarrollodeproducto=False)

    elif id == 'plandenegocios':
        instance = Emprendedor.objects.all().filter(plandenegocios=False)

    elif id == 'innovacionycreatividad':
        instance = Emprendedor.objects.all().filter(
            innovacionycreatividad=False)

    elif id == 'liderazgo':
        instance = Emprendedor.objects.all().filter(liderazgo=False)

    elif id == 'trabajoenequipo':
        instance = Emprendedor.objects.all().filter(trabajoenequipo=False)

    elif id == 'comunicacionefectiva':
        instance = Emprendedor.objects.all().filter(comunicacionefectiva=False)

    elif id == 'concursosyconvocatorias':
        instance = Emprendedor.objects.all().filter(
            concursosyconvocatorias=False)

    elif id == 'tradicionalynotradicional':
        instance = Emprendedor.objects.all().filter(
            tradicionalynotradicional=False)

    elif id == 'marketingdigital':
        instance = Emprendedor.objects.all().filter(marketingdigital=False)

    elif id == 'redessociales':
        instance = Emprendedor.objects.all().filter(redessociales=False)

    elif id == 'tiendaonline':
        instance = Emprendedor.objects.all().filter(tiendaonline=False)

    elif id == 'cooperativismo':
        instance = Emprendedor.objects.all().filter(cooperativismo=False)

    elif id == 'asociativismo':
        instance = Emprendedor.objects.all().filter(asociativismo=False)

    elif id == 'personajuridica':
        instance = Emprendedor.objects.all().filter(personajuridica=False)

    elif id == 'mujeremprendedora':
        instance = Emprendedor.objects.all().filter(mujeremprendedora=False)

    elif id == 'trabajoydiscapacidad':
        instance = Emprendedor.objects.all().filter(trabajoydiscapacidad=False)

    if instance:
        response = HttpResponse(content_type='application/pdf')
        pdf_name = "Capacitaciones.pdf"
        response[
            'Content-Disposition'] = 'filename=%s; pagesize=landscape(A4)' % pdf_name
        buff = BytesIO()
        title = "Capacitaciones"

        Story = [Spacer(1, 0)]
        style = styles["textos"]

        def myFirstPage(canvas, doc):
            canvas.saveState()
            # numeracion de pagina
            canvas.setFont('Ubuntu-R', 8)
            canvas.drawString(inch, 0.75 * inch, "Pagina %d" % (doc.page))
            canvas.restoreState()

        def cuerpo(canvas):
            libro = Paragraph("Capacitaciones", tit1)
            Story.append(libro)
            Story.append(Spacer(1, 30))

            headings = [
                "Apellido y Nombre", "Identificacion ", "Profesion",
                "Telefono", "Confirmacion"
            ]
            allregistros = []
            for r in instance:
                registro = []
                registro.append(r.apellido + ", " + r.nombre)
                registro.append(r.numero)
                registro.append(r.profesion)
                registro.append(r.telefono)
                registro.append("     ")

                allregistros.append(registro)

            t = Table([headings] + allregistros)
            t.setStyle(LIST_STYLE)
            Story.append(t)
            Story.append(Spacer(1, 12))
            return Story

        Story = cuerpo(canvas)
        doc = SimpleDocTemplate(buff,
                                pagesize=A4,
                                rightMargin=72,
                                leftMargin=72,
                                topMargin=72,
                                bottomMargin=75)
        #Construimos el documento a partir de los argumentos definidos
        doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myFirstPage)
        response.write(buff.getvalue())
        buff.close()
        return response
    else:
        raise Http404
Example #38
0
class ReportAdHoc(object):
    def __init__(self, title, sub_title, detail, **kwargs):
        self.name = kwargs.get('name')
        self.receiver = kwargs.get('receiver')
        self.sender = kwargs.get('sender')
        self.equipment = kwargs.get('equipment')
        self.add_equipment = kwargs.get('add_equipment')

        self.title = title
        self.sub_title = sub_title
        self.detail = detail
        self.buffer = BytesIO()
        self.doc = SimpleDocTemplate(self.buffer, pagesize=A4, title='Report')
        self.layout = [Spacer(1, 0.5 * inch)]
        self.style = getSampleStyleSheet()
        self.image_path = path.join(settings.STATIC_ROOT, 'app', 'images', '')

        self.style.add(
            ParagraphStyle(name='sub_title',
                           parent=self.style['Normal'],
                           fontSize=12,
                           leading=16))

        self.style.add(
            ParagraphStyle(name='detail',
                           parent=self.style['Normal'],
                           leading=24))

        self.style.add(
            ParagraphStyle(name='equipment',
                           parent=self.style['Normal'],
                           fontSize=11,
                           leading=14))

        self.style.add(
            ParagraphStyle(name='signature_left',
                           parent=self.style['Normal'],
                           alignment=TA_LEFT,
                           fontSize=12))

        self.style.add(
            ParagraphStyle(name='signature_right',
                           parent=self.style['Normal'],
                           alignment=TA_RIGHT,
                           fontSize=12))
        self.style.add(
            ParagraphStyle(name='grow',
                           parent=self.style['Normal'],
                           alignment=TA_RIGHT,
                           fontSize=15))

    def restore_default_canvas(self, canvas):
        canvas.restoreState()
        canvas.saveState()

    def mix_canvas_paragraph(self, canvas, paragraph, **kwargs):
        doc = kwargs.get('doc', self.doc)
        position = kwargs.get('position')
        horizontal = doc.leftMargin
        paragraph.wrap(doc.width, doc.bottomMargin)
        if isinstance(position, tuple):
            horizontal = position[0]
            position = position[1]
        paragraph.drawOn(canvas, horizontal, position)

    def set_canvas_dynamic(self, top):
        len_detail = len(self.detail)
        if len_detail == 10:
            top = top - (len_detail / 50)
        elif len_detail > 10:
            top = top - (len_detail / 25)
        return top

    def create_header(self, canvas, doc):
        w, h = doc.pagesize
        width = 100
        height = 100
        canvas.saveState()
        canvas.drawImage(path.join('/' + self.image_path, 'NufarmLogo2.jpg'),
                         w - 20 - width,
                         h - 20 - height,
                         width=width,
                         height=height)
        canvas.restoreState()

    def create_title(self):
        self.layout.append(Spacer(0, 15))
        self.layout.append(Paragraph(self.title, self.style['Title']))

    def create_sub_title(self):
        self.layout.append(Paragraph(self.sub_title, self.style['sub_title']))

    def create_detail(self):
        self.layout.append(Spacer(0, 20))
        detail = Table([(key, ': {value}'.format(value=value))
                        for key, value in self.detail.items()],
                       hAlign='LEFT')
        print(detail.wrap(0, 0))
        self.layout.append(detail)

    def create_equipment(self):
        self.layout.append(Spacer(0, 20))
        self.layout.append(
            Paragraph('<b>Perlengkapan standar :</b>',
                      self.style['equipment']))
        equipment_list = self.equipment
        for equipment in equipment_list:
            self.layout.append(
                Paragraph(equipment,
                          self.style['equipment'],
                          bulletText=u'\u27a4'))

    def create_add_equipment(self):
        self.layout.append(Spacer(0, 20))
        self.layout.append(
            Paragraph('<b>Perlengkapan tambahan :</b>',
                      self.style['equipment']))
        add_equipment_list = self.add_equipment
        for add_equipment in add_equipment_list:
            self.layout.append(
                Paragraph(add_equipment,
                          self.style['equipment'],
                          bulletText=u'\u27a4'))

    def create_signature(self, canvas, doc, **kwargs):
        receiver = "Yang Menerima,"
        receiver = Paragraph(receiver, self.style['signature_left'])
        self.mix_canvas_paragraph(canvas=canvas,
                                  paragraph=receiver,
                                  position=3 * inch)

        receiver_name = kwargs.get('receiver_name')
        receiver_name = Paragraph(receiver_name, self.style['signature_left'])
        self.mix_canvas_paragraph(canvas=canvas,
                                  paragraph=receiver_name,
                                  position=1.8 * inch)

        sender = "Yang Menyerahkan,"
        sender = Paragraph(sender, self.style['signature_right'])
        self.mix_canvas_paragraph(canvas=canvas,
                                  paragraph=sender,
                                  position=3 * inch)

        sender_name = kwargs.get('sender_name')
        sender_name = Paragraph(sender_name, self.style['signature_right'])
        self.mix_canvas_paragraph(canvas=canvas,
                                  paragraph=sender_name,
                                  position=1.8 * inch)

    def create_footer(self, canvas, doc):
        canvas.saveState()
        text = "Demikian Berita Acara ini dibuat dengan sebenarnya"
        top = self.set_canvas_dynamic(4.1)
        canvas.drawString(inch, top * inch, text)
        tgl = "Jakarta, 14 Desember 2016"
        canvas.setFont('Helvetica-Bold', 13)
        top = self.set_canvas_dynamic(3.8)
        canvas.drawString(inch, top * inch, tgl)
        self.create_signature(
            canvas=canvas,
            doc=doc,
            receiver_name='<b>{receiver}</b>'.format(receiver=self.receiver),
            sender_name='<b>{sender}</b>'.format(sender=self.sender))
        grow = "<b><font color='green'>Grow a better tomorrow.</font></b>"
        grow = Paragraph(grow, self.style['grow'])
        self.mix_canvas_paragraph(canvas=canvas,
                                  paragraph=grow,
                                  position=((1.5 * inch), (0.5 * inch)))
        canvas.restoreState()

    def first_page(self, canvas, doc):
        self.create_header(canvas, doc)
        if doc.page > 1:
            return
        return self.create_footer(canvas, doc)

    def last_page(self, canvas, doc):
        if doc.page > 1:
            return self.create_footer(canvas, doc)
        return self.create_header(canvas, doc)

    def write_pdf_view(self):
        self.create_title()
        self.create_sub_title()
        self.create_detail()

        if self.equipment:
            self.create_equipment()

        if self.add_equipment:
            self.create_add_equipment()

        self.doc.build(self.layout,
                       onFirstPage=self.first_page,
                       onLaterPages=self.last_page)

        response = HttpResponse(content_type='application/pdf')
        response[
            'Content-Disposition'] = 'inline; filename="{file_name}.pdf"'.format(
                file_name=self.name)
        response.write(self.buffer.getvalue())
        self.buffer.close()
        return response
Example #39
0
    def __call__(
        self,
        fieldname=None,
        direction="thumbnail",
        height=None,
        width=None,
        scale=None,
        **parameters
    ):

        """Factory for image scales`.
        """
        orig_value = getattr(self.context, fieldname, None)
        if orig_value is None:
            return

        if height is None and width is None:
            dummy, format_ = orig_value.contentType.split("/", 1)
            return None, format_, (orig_value._width, orig_value._height)
        elif not parameters and height and width \
                and height == getattr(orig_value, "_height", None) \
                and width == getattr(orig_value, "_width", None):
            dummy, format_ = orig_value.contentType.split("/", 1)
            return orig_value, format_, (orig_value._width, orig_value._height)
        orig_data = None
        try:
            orig_data = orig_value.open()
        except AttributeError:
            orig_data = getattr(aq_base(orig_value), "data", orig_value)
        if not orig_data:
            return
        # Handle cases where large image data is stored in FileChunks instead
        # of plain string
        if isinstance(orig_data, tuple(FILECHUNK_CLASSES)):
            # Convert data to 8-bit string
            # (FileChunk does not provide read() access)
            orig_data = str(orig_data)

        # If quality wasn't in the parameters, try the site's default scaling
        # quality if it exists.
        if "quality" not in parameters:
            quality = self.get_quality()
            if quality:
                parameters["quality"] = quality

        if not getattr(orig_value, "contentType", "") == "image/svg+xml":
            try:
                result = self.create_scale(
                    orig_data,
                    direction=direction,
                    height=height,
                    width=width,
                    **parameters
                )
            except (ConflictError, KeyboardInterrupt):
                raise
            except Exception:
                logger.exception(
                    'Could not scale "{0!r}" of {1!r}'.format(
                        orig_value, self.context.absolute_url(),
                    ),
                )
                return
            if result is None:
                return
        else:
            if isinstance(orig_data, (six.text_type)):
                orig_data = safe_encode(orig_data)
            if isinstance(orig_data, (bytes)):
                orig_data = BytesIO(orig_data)

            result = orig_data.read(), "svg+xml", (width, height)

        data, format_, dimensions = result
        mimetype = "image/{0}".format(format_.lower())
        value = orig_value.__class__(
            data, contentType=mimetype, filename=orig_value.filename,
        )
        value.fieldname = fieldname

        # make sure the file is closed to avoid error:
        # ZODB-5.5.1-py3.7.egg/ZODB/blob.py:339: ResourceWarning:
        # unclosed file <_io.FileIO ... mode='rb' closefd=True>
        if isinstance(orig_data, BlobFile):
            orig_data.close()

        return value, format_, dimensions
Example #40
0
def retreive_compound_sdf_chembl(molregno,getmol_url='/chembl/download_helper/getmol/',outputfile=None):
    DOMAIN_URL = 'https://www.ebi.ac.uk'
    errdata = dict()
    data = None
    do_not_skip_on_debug = False
    SIZE_LIMIT = 52428800
    RECIEVE_TIMEOUT = 120
    args = []
    try:
        
        response = requests.get(DOMAIN_URL+getmol_url+str(molregno),timeout=30,stream=True,verify=True)
        response.raise_for_status()
        if outputfile:
            fileh = open(outputfile,'w+b')
        else:
            fileh = BytesIO(b'')
        size = 0
        start = time.time()
        chunks = response.iter_content(chunk_size=524288)
        for chunk in chunks:
            size += len(chunk)
            if size > SIZE_LIMIT:
                raise StreamSizeLimitError('response too large')
            if time.time() - start > RECIEVE_TIMEOUT:
                raise StreamTimeoutError('timeout reached')
            fileh.write(chunk)

        response.close()
        if not outputfile:
            data = fileh.read()
            fileh.close()
        fileh.seek(0)
        mol = open_molecule_file(fileh,filetype='sdf')
        del mol

        return(data,errdata)
           
          
    except HTTPError:
      errdata['Error'] = True
      errdata['ErrorType'] = 'HTTPError'
      errdata['status_code'] = response.status_code
      errdata['reason'] = response.reason
    except ConnectionError as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'ConnectionError'
      errdata['reason'] = 'Cannot connect.'
    except Timeout as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'Timeout'
      errdata['reason'] = 'Timeout exceeded.'
    except TooManyRedirects as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'TooManyRedirects'
      errdata['reason'] = 'Too many redirects.'
    except StreamSizeLimitError as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'StreamSizeLimitError'
      errdata['reason'] = str(e)
    except StreamTimeoutError as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'StreamTimeoutError'
      errdata['reason'] = str(e)
    except ParsingError as e:
      errdata['Error'] = True
      errdata['ErrorType'] = 'ParsingError'
      errdata['reason'] = str(e)
    except:
      errdata['Error'] = True
      errdata['ErrorType'] = 'Internal'
      errdata['reason'] = ''
      do_not_skip_on_debug = True
      raise
    finally:
      try:
        response.close()
      except:
        pass
      try:
        fileh.close()
      except:
        pass
      if not (settings.DEBUG and do_not_skip_on_debug):
        return(data,errdata)
    def get_timeseries(self,
                       starttime,
                       endtime,
                       observatory=None,
                       channels=None,
                       type=None,
                       interval=None):
        """Get timeseries data

        Parameters
        ----------
        starttime: obspy.core.UTCDateTime
            time of first sample.
        endtime: obspy.core.UTCDateTime
            time of last sample.
        observatory: str
            observatory code.
        channels: array_like
            list of channels to load
        type: {'variation', 'quasi-definitive', 'definitive'}
            data type.
        interval: {'daily', 'hourly', 'minute', 'second'}
            data interval.

        Returns
        -------
        obspy.core.Stream
            timeseries object with requested data.

        Raises
        ------
        TimeseriesFactoryException
            if invalid values are requested, or errors occur while
            retrieving timeseries.
        """
        observatory = observatory or self.observatory
        channels = channels or self.channels
        type = type or self.type
        interval = interval or self.interval

        if starttime > endtime:
            raise TimeseriesFactoryException(
                'Starttime before endtime "%s" "%s"' % (starttime, endtime))

        # need this until https://github.com/obspy/obspy/pull/1179
        # replace stdout
        original_stdout = sys.stdout
        temp_stdout = BytesIO()
        try:
            sys.stdout = temp_stdout
            # get the timeseries
            timeseries = obspy.core.Stream()
            for channel in channels:
                data = self._get_timeseries(starttime, endtime, observatory,
                                            channel, type, interval)
                timeseries += data
        # restore stdout
        finally:
            output = temp_stdout.getvalue()
            if output:
                sys.stderr.write(str(output))
            temp_stdout.close()
            sys.stdout = original_stdout
        self._post_process(timeseries, starttime, endtime, channels)

        return timeseries
Example #42
0
class WebPImageFile(ImageFile.ImageFile):

    format = "WEBP"
    format_description = "WebP image"
    __loaded = 0
    __logical_frame = 0

    def _open(self):
        if not _webp.HAVE_WEBPANIM:
            # Legacy mode
            data, width, height, self.mode, icc_profile, exif = _webp.WebPDecode(
                self.fp.read())
            if icc_profile:
                self.info["icc_profile"] = icc_profile
            if exif:
                self.info["exif"] = exif
            self._size = width, height
            self.fp = BytesIO(data)
            self.tile = [("raw", (0, 0) + self.size, 0, self.mode)]
            self.n_frames = 1
            self.is_animated = False
            return

        # Use the newer AnimDecoder API to parse the (possibly) animated file,
        # and access muxed chunks like ICC/EXIF/XMP.
        self._decoder = _webp.WebPAnimDecoder(self.fp.read())

        # Get info from decoder
        width, height, loop_count, bgcolor, frame_count, mode = self._decoder.get_info(
        )
        self._size = width, height
        self.info["loop"] = loop_count
        bg_a, bg_r, bg_g, bg_b = (
            (bgcolor >> 24) & 0xFF,
            (bgcolor >> 16) & 0xFF,
            (bgcolor >> 8) & 0xFF,
            bgcolor & 0xFF,
        )
        self.info["background"] = (bg_r, bg_g, bg_b, bg_a)
        self.n_frames = frame_count
        self.is_animated = self.n_frames > 1
        self.mode = "RGB" if mode == "RGBX" else mode
        self.rawmode = mode
        self.tile = []

        # Attempt to read ICC / EXIF / XMP chunks from file
        icc_profile = self._decoder.get_chunk("ICCP")
        exif = self._decoder.get_chunk("EXIF")
        xmp = self._decoder.get_chunk("XMP ")
        if icc_profile:
            self.info["icc_profile"] = icc_profile
        if exif:
            self.info["exif"] = exif
        if xmp:
            self.info["xmp"] = xmp

        # Initialize seek state
        self._reset(reset=False)

    def _getexif(self):
        if "exif" not in self.info:
            return None
        return dict(self.getexif())

    def seek(self, frame):
        if not self._seek_check(frame):
            return

        # Set logical frame to requested position
        self.__logical_frame = frame

    def _reset(self, reset=True):
        if reset:
            self._decoder.reset()
        self.__physical_frame = 0
        self.__loaded = -1
        self.__timestamp = 0

    def _get_next(self):
        # Get next frame
        ret = self._decoder.get_next()
        self.__physical_frame += 1

        # Check if an error occurred
        if ret is None:
            self._reset()  # Reset just to be safe
            self.seek(0)
            raise EOFError("failed to decode next frame in WebP file")

        # Compute duration
        data, timestamp = ret
        duration = timestamp - self.__timestamp
        self.__timestamp = timestamp

        # libwebp gives frame end, adjust to start of frame
        timestamp -= duration
        return data, timestamp, duration

    def _seek(self, frame):
        if self.__physical_frame == frame:
            return  # Nothing to do
        if frame < self.__physical_frame:
            self._reset()  # Rewind to beginning
        while self.__physical_frame < frame:
            self._get_next()  # Advance to the requested frame

    def load(self):
        if _webp.HAVE_WEBPANIM:
            if self.__loaded != self.__logical_frame:
                self._seek(self.__logical_frame)

                # We need to load the image area_data for this frame
                data, timestamp, duration = self._get_next()
                self.info["timestamp"] = timestamp
                self.info["duration"] = duration
                self.__loaded = self.__logical_frame

                # Set tile
                if self.fp and self._exclusive_fp:
                    self.fp.close()
                self.fp = BytesIO(data)
                self.tile = [("raw", (0, 0) + self.size, 0, self.rawmode)]

        return super().load()

    def tell(self):
        if not _webp.HAVE_WEBPANIM:
            return super().tell()

        return self.__logical_frame
def generar_croq(ubigeo, aeut):
    print "croquis"
    MARGIN_SIZE = 17 * mm
    PAGE_SIZE = A4
    response = HttpResponse(content_type='application/pdf')
    response[
        'Content-Disposition'] = "attachment; filename=" + ubigeo + "001" + str(
            aeut.aeu_final) + ".pdf"
    pdf_name = "clientes.pdf"
    styles = getSampleStyleSheet()
    stylesTitle = getSampleStyleSheet()
    stylesCabe = getSampleStyleSheet()

    styleTitle = stylesTitle["Normal"]
    styleTitle.alignment = TA_CENTER

    styleBH = styles["Normal"]
    styleBH.alignment = TA_LEFT

    styleCa = stylesCabe["Normal"]
    styleCa.alignment = TA_CENTER

    buff = BytesIO()
    doc = SimpleDocTemplate(
        buff,
        pagesize=A4,
        rightMargin=70,
        leftMargin=70,
        topMargin=60,
        bottomMargin=18,
    )
    h1 = PS(name='Heading1', fontSize=9, leading=16)
    h2 = PS(name='Normal', fontSize=7, leading=16)

    h3 = PS(name='Normal', fontSize=6, leading=16, alignment=TA_CENTER)

    h4 = PS(name='Normal', fontSize=6, leading=16)

    story = []

    distrito = Distrito.objects.get(ubigeo=ubigeo)  # ubigeo

    # if aeut!=None and aeut!='':
    cond = Aeus.objects.filter(ubigeo=distrito.ubigeo, zona='00100')
    # elif ubigeo!=None and ubigeo!='':
    #     cond = Aeus.objects.filter(ubigeo=distrito.ubigeo, zona = '00100', aeu_final = aeut)
    #
    # if aeut!=None and aeut!='':
    total = str(
        Aeus.objects.filter(ubigeo=distrito.ubigeo, zona='00100').count())
    # elif ubigeo!= None and ubigeo!='':
    #     total = str(Aeus.objects.filter(ubigeo=distrito.ubigeo, zona = '00100', aeu_final = aeut).count())
    x = 0
    rango_equivalencia = [[1, 'A'], [2, 'B'], [3, 'C'], [4, 'D'], [5, 'E'],
                          [6, 'F'], [7, 'G'], [8, 'H'], [9, 'I'], [10, 'J'],
                          [11, 'K'], [12, 'L'], [13, 'M'], [14, 'N'],
                          [15, 'O'], [16, 'P'], [17, 'Q']]

    for aeu in cond:
        x = x + 1
        y = x

        if aeu.seccion < 10:
            secc = Paragraph("00" + str(aeu.seccion), h1)
        elif aeu.seccion > 9 or aeu.seccion < 100:
            secc = Paragraph("0" + str(aeu.seccion), h1)
        else:
            secc = Paragraph(str(aeu.seccion), h1)

        if aeu.aeu_final < 10:
            aeus = Paragraph("00" + str(aeu.aeu_final), h1)
        elif aeu.aeu_final > 9 or aeu.aeu_final < 100:
            aeus = Paragraph("0" + str(aeu.aeu_final), h1)
        else:
            aeus = Paragraph(str(aeu.aeu_final), h1)

        zona_temp = aeu.zona[0:3]
        zona_int = int(aeu.zona[3:])
        zona_int_eq = ""
        for el in rango_equivalencia:
            if (el[0] == zona_int):
                zona_int_eq = el[1]

        zona_temp = zona_temp + str(zona_int_eq)

        data_croq = [
            ['', '', '', '', '',
             Paragraph('<strong>Doc. CPV</strong>', h4)],
            [
                Paragraph('<strong>A. UBICACION GEOGRAFICA</strong>', h1), '',
                '', '',
                Paragraph('<strong>B. UBICACION CENSAL</strong>', h1), ''
            ],
            [
                Paragraph('<strong>DEPARTAMENTO</strong>', h1),
                Paragraph(str(distrito.ccdd.ccdd), h1),
                Paragraph(str(distrito.ccdd.departamento), h1), '',
                Paragraph('<strong>ZONA Nº</strong>', h1),
                Paragraph(zona_temp, h1)
            ],
            [
                Paragraph('<strong>PROVINCIA</strong>', h1),
                Paragraph(distrito.ccpp, h1),
                Paragraph(distrito.cod_prov.provincia, h1), '',
                Paragraph(str('<strong>SECCION Nº</strong>'), h1), secc
            ],
            [
                Paragraph('<strong>DISTRITO</strong>', h1),
                Paragraph(distrito.ccdi, h1),
                Paragraph(distrito.distrito, h1), '',
                Paragraph('<strong>A.E.U. Nº</strong>', h1), aeus
            ],
            [
                Paragraph('<strong>CENTRO POBLADO</strong>', h1),
                Paragraph(aeu.llave_ccpp.nomccpp, h1), '', '', '', ''
            ],
            [
                Paragraph('<strong>CATEGORIA CENTRO POBLADO</strong>', h1),
                Paragraph('CIUDAD', h1), '', '',
                Paragraph('<strong>C. TOTAL DE VIVIENDAS DEL A.E.U.</strong>',
                          h1),
                Paragraph(str(int(aeu.sum_viv_ae)), h1)
            ],
        ]

        tables_croq = Table(
            data_croq,
            colWidths=[6 * cm, 1 * cm, 3.5 * cm, 0.1 * cm, 6 * cm, 1.5 * cm])
        #t_aeu = Table(all_aeu, colWidths=[6 * cm, 1 * cm, 3.5 * cm, 0.1 * cm, 6 * cm, 1.5 * cm])

        tables_croq.setStyle(
            TableStyle([('TEXTCOLOR', (0, 0), (5, 0), colors.black),
                        ('ALIGN', (4, 0), (5, 0), 'RIGHT'),
                        ('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
                        ('GRID', (0, 1), (2, 6), 1, colors.black),
                        ('GRID', (4, 1), (5, 4), 1, colors.black),
                        ('GRID', (-2, -1), (-1, -1), 1, colors.black),
                        ('SPAN', (0, 1), (2, 1)), ('SPAN', (4, 1), (5, 1)),
                        ('SPAN', (1, 5), (2, 5)), ('SPAN', (1, 6), (2, 6)),
                        ('BACKGROUND', (4, 1), (5, 5), colors.white),
                        ('BACKGROUND', (0, 1), (-1, 1), colors.lightskyblue),
                        ('BACKGROUND', (0, 1), (0, 6), colors.lightskyblue),
                        ('BACKGROUND', (4, 1), (4, 4), colors.lightskyblue),
                        ('BACKGROUND', (4, 6), (4, 6), colors.lightskyblue)]))
        t1_c = Paragraph(
            "<strong>INSTITUO NACIONAL DE ESTADISTICA E INFORMATICA</strong>",
            styleTitle)
        t2_c = Paragraph(
            "<strong>CENSOS NACIONALES 2017: XII DE POBLACION, VII DE VIVIENDA</strong>",
            styleTitle)
        # story.append(Spacer(0, 001 * mm))
        t3_c = Paragraph("<strong>Y III DE COMUNIDADES INDIGENAS</strong>",
                         styleTitle)
        fichero_imagen_inei = 'Reporte/Img/inei.png'
        imagen_logo_inei = Image(os.path.realpath(fichero_imagen_inei),
                                 width=50,
                                 height=50)

        P2 = Paragraph('', styleBH)
        fichero_imagen = 'Reporte/Img/escudo.png'
        imagen_logo = Image(os.path.realpath(fichero_imagen),
                            width=50,
                            height=50)

        t = Table(data=[['', t1_c, ''],
                        [[imagen_logo, P2], t2_c, [imagen_logo_inei, P2]],
                        ['', t3_c, '']],
                  colWidths=[2 * cm, 13 * cm, 2 * cm],
                  style=[
                      ('GRID', (1, 1), (-2, -2), 1, colors.white),
                      ('GRID', (0, 0), (-1, -1), 0.5, colors.white),
                  ])
        story.append(t)
        story.append(Spacer(0, 3 * mm))
        story.append(
            Paragraph(
                "<strong>CROQUIS DEL ÁREA DE EMPADRONAMIENTO URBANO</strong>",
                styleTitle))
        story.append(Spacer(0, 1 * mm))
        story.append(tables_croq)
        story.append(Spacer(0, 2 * mm))

        viv_urb = ViviendaUrbana.objects.filter(
            Q(ubigeo=distrito.ubigeo), Q(zona=aeu.zona),
            Q(aeu_final=aeu.aeu_final)).order_by('or_viv_aeu')

        P2 = Paragraph('', styleBH)
        fichero_imagen = 'Reporte/Croquis/Zona' + ubigeo + '00100' + '/Imagen' + ubigeo + '00100' + str(
            aeut.aeu_final) + '.jpg'
        imagen_croquis = Image(os.path.realpath(fichero_imagen),
                               width=16 * cm,
                               height=14.5 * cm)

        data_img = [
            [Paragraph(e, h3) for e in ["<strong>Imagen de Croquis</strong>"]],
        ]
        cr = Table(data=[[imagen_croquis]],
                   colWidths=[18 * cm],
                   style=[
                       ('GRID', (1, 1), (-2, -2), 1, colors.black),
                       ('GRID', (0, 0), (-1, -1), 0.5, colors.black),
                   ])

        story.append(cr)

        Z1_croq = Paragraph(
            "<strong>OBSERVACIONES: ...................................................................."
            "..........................................................................................."
            "..........................................................................................."
            "..........................................................................................."
            "..........................................................................................."
            "..........................................................................................."
            "..........................................................................................."
            "..........................................................................................."
            ".........................................</strong>", h2)

        table_obs_croq = Table(data=[[Z1_croq]],
                               colWidths=[18.3 * cm],
                               style=[('GRID', (0, 0), (-1, -1), 1,
                                       colors.black)])

        p_croq = Paragraph(str(y) + " - " + total, h2)
        extra = Paragraph("-", h2)

        p_page = Table(data=[[extra, p_croq]],
                       colWidths=[17 * cm, 2.3 * cm],
                       style=[
                           ('GRID', (0, 0), (-1, -1), 1, colors.white),
                           ('ALIGN', (0, 0), (1, 0), 'RIGHT'),
                       ])
        story.append(Spacer(0, 1 * mm))
        story.append(Spacer(0, 2 * mm))
        story.append(table_obs_croq)
        story.append(Spacer(0, 2 * mm))
        story.append(p_page)

        story.append(PageBreak())

    doc.build(story)
    obj = buff.getvalue()
    buff.close()
    return obj
Example #44
0
def print_pdf_EARLY_SETTLEMENT(request, agreement_id):

    pdf_buffer = BytesIO()
    my_doc = SimpleDocTemplate(pdf_buffer,
                               rightMargin=50,
                               leftMargin=50,
                               topMargin=20,
                               bottomMargin=50
                               )
    flowables = []

    sample_style_sheet = getSampleStyleSheet()
    sample_style_sheet.list()

    agreement_detail = go_agreement_querydetail.objects.get(agreementnumber=agreement_id)
    agreement_customer = go_customers.objects.get(customernumber=agreement_detail.agreementcustomernumber)
    account_summary = go_account_transaction_summary.objects.filter(agreementnumber=agreement_id)
    if agreement_detail.agreementdefname != 'Hire Purchase' and agreement_detail.agreementdefname != 'Management Fee':
        agreement_type = 'Lease'
        sales_tax_rate = 1.2
    else:
        agreement_type = 'HP'
        sales_tax_rate = 1.0

    settlement_figure_queryset = account_summary.aggregate(Sum('transnetpayment'))
    settlement_figure = settlement_figure_queryset['transnetpayment__sum']
    settlement_figure_queryset_gross = account_summary.aggregate(Sum('transgrosspayment'))
    settlement_figure_gross = settlement_figure_queryset_gross['transgrosspayment__sum']
    if agreement_type == 'Lease':
        settlement_figure_vat = settlement_figure * decimal.Decimal(sales_tax_rate)
    else:
        settlement_figure_vat = settlement_figure * decimal.Decimal(sales_tax_rate)

    next_rental_date = go_account_transaction_summary.objects.filter(agreementnumber=agreement_id,
                                                                            transactionsourceid__in=['SP1', 'SP2',
                                                                                                     'SP3', 'GO1', 'GO3'],
                                                                            transtypeid__isnull=False,
                                                                            transactiondate__gt=datetime.date.today()).first()
    next = next_rental_date.transactiondate.strftime("%d/%m/%Y")

    paragraph_33 = Paragraph(

        "<u> Early Termination Figure </u>",

        sample_style_sheet['Heading1']
    )
    arrears_total_collected = request.GET.get('arrears_total_collected')

    a = Paragraph('''<u>Hire Agreement Number:</u>''', sample_style_sheet['BodyText'])
    b = Paragraph('''<u>Hire Agreement Name:</u>''', sample_style_sheet['BodyText'])
    c = Paragraph('''<u>Goods:</u>''', sample_style_sheet['BodyText'])
    d = Paragraph('''Terminal Settlement Figure:''', sample_style_sheet['Heading4'])
    e = Paragraph("£" + str(format(arrears_total_collected)), sample_style_sheet['Heading4'])
    table3 = [a, agreement_id], \
             [b, agreement_customer.customercompany], \
             [c, "As per schedule NCF01"], \
             [d, e]

    paragraph_4 = Paragraph(
        "In response to your request for a termination figure for agreement " + agreement_id + " we have pleasure in providing the following information. For security purposes the termination details are provided by email and post.If you have not requested this, please contact us immediately."
        ,
        sample_style_sheet['BodyText']
    )
    paragraph_5 = Paragraph(

        "Your Account details are protected by the Data Protection Act (DPA), so we can only discuss your account with you. We will not discuss details of your account with any other person unless you first give us your express permission to do so. This is to ensure the details about your business remain secure at all times. "
        ,
        sample_style_sheet['BodyText']
    )
    paragraph_6 = Paragraph(
        "These types of agreements have huge tax benefits to your business and are not interest only contracts, we are not allowed or permitted to discount over a certain level, however there are no penalties for early termination. This figure has been calculated after taking into account the transactions up to and including todays date and is valid until the date shown below. We are assuming that your bank will not recall any direct debit, standing order and any cheques already received by us will be honoured. The Termination Sum which you will have to pay upon early termination of this Agreement will be based upon the remaining total gross rentals shown on the agreement in the Rental payments section as also shown in clause 9 (b). This termination sum represents damages and not a supply of services therefore you will not receive a separate vat invoice as per clause 9 (d). The total payable below is only valid until the date shown below subject to the agreement being upto date. "
        ,
        sample_style_sheet['BodyText']
    )
    paragraph_7 = Paragraph(
        "<b>Important - Your Personal Information</b> - We may use your personal information for a variety of purposes and further details of the use of information by us can be found about this and your other rights if you see our Fair Processing Notice at: www.bluerockfinance.co.uk / fair - processing - notice /. We consider that such processing is necessary for our legitimate interests in considering applications and in operating Agreements and our business, and it is a requirement of entering into an Agreement. You have a right to object to our processing your information on grounds relating to your particular situation."
        ,
        sample_style_sheet['BodyText']
    )
    paragraph_8 = Paragraph(
        "If you decide to terminate the agreement early and applying the maximum discount possible, including the notice period, please see below:       "
        ,
        sample_style_sheet['BodyText']
    )

    f = Paragraph("Total Payable for Settlement:", sample_style_sheet['Heading4'])
    h = Paragraph("Valid Until:", sample_style_sheet['Heading4'])
    i = Paragraph(next, sample_style_sheet['Heading4'])

    table5 = [f, e, h, i],

    j = Paragraph("Bank Name:", sample_style_sheet['BodyText'])
    k = Paragraph("Coutts & Co", sample_style_sheet['BodyText'])
    l = Paragraph("Account No & Sort Code:", sample_style_sheet['BodyText'])
    m = Paragraph("0576 9981   18 - 00 - 02", sample_style_sheet['BodyText'])

    n = Paragraph("Account Name:", sample_style_sheet['BodyText'])
    o = Paragraph("Bluerock Secured Finance", sample_style_sheet['BodyText'])
    p = Paragraph("Reference:", sample_style_sheet['BodyText'])
    q = Paragraph(agreement_id, sample_style_sheet['BodyText'])

    table4 = [j, k, l, m], \
             [n, o, p, q]

    paragraph_11 = Paragraph(
        "We offer a new business discount for further finance taken out prior to the valid until date shown above. If you would like to discuss the end of hire options & requirements, then please contact your broker."
        ,
        sample_style_sheet['BodyText']
    )
    paragraph_12 = Paragraph(
        "We would like to take this opportunity to thank you for using Bluerock Secured Finance Ltd and wish you "
        "and your business every success in the future."
        ,
        sample_style_sheet['BodyText']
    )
    paragraph_125 = Paragraph(
        "",
        sample_style_sheet['BodyText']
    )

    paragraph_13 = Paragraph(
        " Yours faithfully,"
        ,
        sample_style_sheet['BodyText']
    )
    paragraph_14 = Paragraph(
        "Alan Richards"
        ,
        sample_style_sheet['BodyText']
    )
    paragraph_15 = Paragraph(
        " Customer Services"
        ,
        sample_style_sheet['BodyText']
    )
    paragraph_16 = Paragraph(

        " VAT Reg No. 974 594073 | Authorised & Regulated by the Financial Conduct Authority Firm Ref No: 729205 | Company Reg No. 06944649."
        ,

        sample_style_sheet['Heading6']
    )

    im = Image("static/assets/images/others/bluerock-logo.jpg", width=3.4 * inch, height=0.8 * inch)
    im.hAlign = 'RIGHT'

    if agreement_customer.customeraddress1:
        address1 = Paragraph(agreement_customer.customeraddress1, sample_style_sheet['BodyText'])
    else:
        address1 = ''
    if agreement_customer.customeraddress2:
        address2 = Paragraph(agreement_customer.customeraddress2, sample_style_sheet['BodyText'])
    else:
        address2 = ''
    if agreement_customer.customeraddress3:
        address3 = Paragraph(agreement_customer.customeraddress3, sample_style_sheet['BodyText'])
    else:
        address3 = ''
    if agreement_customer.customeraddress4:
        address4 = Paragraph(agreement_customer.customeraddress4, sample_style_sheet['BodyText'])
    else:
        address4 = ''
    if agreement_customer.customeraddress5:
        address5 = Paragraph(agreement_customer.customeraddress5, sample_style_sheet['BodyText'])
    else:
        address5 = ''
    if agreement_customer.customerpostcode:
        postcode = Paragraph(agreement_customer.customerpostcode, sample_style_sheet['BodyText'])
    array = [agreement_customer.customercompany, address1, address2, address3, address4, address5, postcode]
    while ('' in array): array.remove('')
    array.append('')
    array.append('')
    array.append('')
    array.append('')

    data2 = [['', ''],
             [array[0], ''],
             [array[1], ''],
             [array[2], ''],
             [array[3], ''],
             [array[4], ''],
             [array[5], im],
             ]

    t2 = Table(data2, colWidths=247, rowHeights=15)
    t3 = Table(table3, colWidths=247, rowHeights=15, style=[])
    t5 = Table(table5, colWidths=99, rowHeights=18, style=[])
    t4 = Table(table4, colWidths=120, rowHeights=15, style=[])

    t4._argW[0] = 1.2 * inch
    t4._argW[1] = 2 * inch
    t4._argW[2] = 1.9 * inch
    t4._argW[3] = 1.8 * inch

    t5._argW[0] = 2.4 * inch
    t5._argW[1] = 1.5 * inch
    t5._argW[2] = 1.5 * inch
    t5._argW[3] = 1.5 * inch

    flowables.append(t2)
    flowables.append(paragraph_33)
    flowables.append(t3)
    flowables.append(paragraph_4)
    flowables.append(paragraph_5)
    flowables.append(paragraph_6)
    flowables.append(paragraph_7)
    flowables.append(paragraph_8)
    flowables.append(t5)
    flowables.append(t4)
    flowables.append(paragraph_11)
    flowables.append(paragraph_12)
    flowables.append(paragraph_13)
    flowables.append(paragraph_125)
    flowables.append(paragraph_125)
    flowables.append(paragraph_125)
    flowables.append(paragraph_14)
    flowables.append(paragraph_15)
    flowables.append(paragraph_16)

    my_doc.build(flowables)

    pdf_EARLY_SETTLEMENT_value = pdf_buffer.getvalue()
    pdf_buffer.close()
    response = HttpResponse(content_type='application.pdf')

    filename = 'Apellio ' + agreement_id + " Early Settlement Figure"

    response['Content-Disposition'] = "attachment; filename=%s.pdf" % filename

    response.write(pdf_EARLY_SETTLEMENT_value)
    return response
    def handle(self, *args, **options):
        from io import BytesIO
        import mimetypes
        import boto
        from boto.s3.key import Key

        if hasattr(settings, 'USE_S3_STORAGE') and settings.USE_S3_STORAGE:
            bucket_name = settings.AWS_STORAGE_BUCKET_NAME
            bucket_site_folder_name = settings.AWS_LOCATION
            conn = boto.connect_s3(settings.AWS_ACCESS_KEY_ID,
                                   settings.AWS_SECRET_ACCESS_KEY)
            bucket = conn.get_bucket(bucket_name)
            k = Key(bucket)

            static_root = settings.STATIC_ROOT
            static_url_to_find = settings.LOCAL_STATIC_URL
            static_url_replace_with = settings.STATIC_URL

            if os.path.isdir(static_root):
                # walk through the directory
                for dirpath, dirnames, filenames in os.walk(static_root):
                    for filename in filenames:
                        # skip the jquery and websymbols.css
                        if filename.find(
                                'jquery'
                        ) == -1 and filename != 'websymbols.css':
                            if os.path.splitext(filename)[1] in [
                                    '.js', '.css', '.less'
                            ]:
                                file_path = (os.path.join(dirpath,
                                                          filename)).replace(
                                                              '\\', '/')

                                with open(file_path) as f:
                                    content = f.read()
                                    if content.find(static_url_to_find) != -1:
                                        new_content = content.replace(
                                            static_url_to_find,
                                            static_url_replace_with)
                                        # upload to s3
                                        key = '%s/%s/%s' % (
                                            bucket_site_folder_name,
                                            dirpath.replace(
                                                static_root,
                                                'static'), filename)

                                        k.key = key

                                        content_type = mimetypes.guess_type(
                                            filename
                                        )[0] or k.DefaultContentType
                                        k.set_metadata('Content-Type',
                                                       content_type)
                                        myfile = BytesIO(new_content)
                                        k.set_contents_from_file(myfile,
                                                                 replace=True)
                                        myfile.close()
                                        #k.set_contents_from_string(new_content, replace=True)
                                        k.set_acl('public-read')
                                        print(file_path)

        else:
            print('Site is not using S3 Storage.')
Example #46
0
    def GenerateGuidXRefFile(BuildDb, ArchList, FdfParserObj):
        GuidXRefFileName = os.path.join(GenFdsGlobalVariable.FvDir,
                                        "Guid.xref")
        GuidXRefFile = BytesIO('')
        PkgGuidDict = {}
        GuidDict = {}
        ModuleList = []
        FileGuidList = []
        for Arch in ArchList:
            PlatformDataBase = BuildDb.BuildObject[
                GenFdsGlobalVariable.ActivePlatform, Arch,
                GenFdsGlobalVariable.TargetName,
                GenFdsGlobalVariable.ToolChainTag]
            PkgList = GenFdsGlobalVariable.WorkSpace.GetPackageList(
                GenFdsGlobalVariable.ActivePlatform, Arch,
                GenFdsGlobalVariable.TargetName,
                GenFdsGlobalVariable.ToolChainTag)
            for P in PkgList:
                PkgGuidDict.update(P.Guids)
            for Name, Guid in PlatformDataBase.Pcds:
                Pcd = PlatformDataBase.Pcds[Name, Guid]
                if Pcd.Type in [TAB_PCDS_DYNAMIC_HII, TAB_PCDS_DYNAMIC_EX_HII]:
                    for SkuId in Pcd.SkuInfoList:
                        Sku = Pcd.SkuInfoList[SkuId]
                        if Sku.VariableGuid and Sku.VariableGuid in PkgGuidDict.keys(
                        ):
                            GuidDict[Sku.VariableGuid] = PkgGuidDict[
                                Sku.VariableGuid]
            for ModuleFile in PlatformDataBase.Modules:
                Module = BuildDb.BuildObject[ModuleFile, Arch,
                                             GenFdsGlobalVariable.TargetName,
                                             GenFdsGlobalVariable.ToolChainTag]
                if Module in ModuleList:
                    continue
                else:
                    ModuleList.append(Module)
                if GlobalData.gGuidPattern.match(ModuleFile.BaseName):
                    GuidXRefFile.write("%s %s\n" %
                                       (ModuleFile.BaseName, Module.BaseName))
                else:
                    GuidXRefFile.write("%s %s\n" %
                                       (Module.Guid, Module.BaseName))
                GuidDict.update(Module.Protocols)
                GuidDict.update(Module.Guids)
                GuidDict.update(Module.Ppis)
            for FvName in FdfParserObj.Profile.FvDict:
                for FfsObj in FdfParserObj.Profile.FvDict[FvName].FfsList:
                    if not isinstance(FfsObj, FileStatement):
                        InfPath = PathClass(
                            NormPath(
                                mws.join(GenFdsGlobalVariable.WorkSpaceDir,
                                         FfsObj.InfFileName)))
                        FdfModule = BuildDb.BuildObject[
                            InfPath, Arch, GenFdsGlobalVariable.TargetName,
                            GenFdsGlobalVariable.ToolChainTag]
                        if FdfModule in ModuleList:
                            continue
                        else:
                            ModuleList.append(FdfModule)
                        GuidXRefFile.write(
                            "%s %s\n" % (FdfModule.Guid, FdfModule.BaseName))
                        GuidDict.update(FdfModule.Protocols)
                        GuidDict.update(FdfModule.Guids)
                        GuidDict.update(FdfModule.Ppis)
                    else:
                        FileStatementGuid = FfsObj.NameGuid
                        if FileStatementGuid in FileGuidList:
                            continue
                        else:
                            FileGuidList.append(FileStatementGuid)
                        Name = []
                        FfsPath = os.path.join(GenFdsGlobalVariable.FvDir,
                                               'Ffs')
                        FfsPath = glob(
                            os.path.join(FfsPath, FileStatementGuid) +
                            TAB_STAR)
                        if not FfsPath:
                            continue
                        if not os.path.exists(FfsPath[0]):
                            continue
                        MatchDict = {}
                        ReFileEnds = compile(
                            '\S+(.ui)$|\S+(fv.sec.txt)$|\S+(.pe32.txt)$|\S+(.te.txt)$|\S+(.pic.txt)$|\S+(.raw.txt)$|\S+(.ffs.txt)$'
                        )
                        FileList = os.listdir(FfsPath[0])
                        for File in FileList:
                            Match = ReFileEnds.search(File)
                            if Match:
                                for Index in range(1, 8):
                                    if Match.group(Index) and Match.group(
                                            Index) in MatchDict:
                                        MatchDict[Match.group(Index)].append(
                                            File)
                                    elif Match.group(Index):
                                        MatchDict[Match.group(Index)] = [File]
                        if not MatchDict:
                            continue
                        if '.ui' in MatchDict:
                            for File in MatchDict['.ui']:
                                with open(os.path.join(FfsPath[0], File),
                                          'rb') as F:
                                    F.read()
                                    length = F.tell()
                                    F.seek(4)
                                    TmpStr = unpack('%dh' % ((length - 4) / 2),
                                                    F.read())
                                    Name = ''.join(chr(c) for c in TmpStr[:-1])
                        else:
                            FileList = []
                            if 'fv.sec.txt' in MatchDict:
                                FileList = MatchDict['fv.sec.txt']
                            elif '.pe32.txt' in MatchDict:
                                FileList = MatchDict['.pe32.txt']
                            elif '.te.txt' in MatchDict:
                                FileList = MatchDict['.te.txt']
                            elif '.pic.txt' in MatchDict:
                                FileList = MatchDict['.pic.txt']
                            elif '.raw.txt' in MatchDict:
                                FileList = MatchDict['.raw.txt']
                            elif '.ffs.txt' in MatchDict:
                                FileList = MatchDict['.ffs.txt']
                            else:
                                pass
                            for File in FileList:
                                with open(os.path.join(FfsPath[0], File),
                                          'r') as F:
                                    Name.append((F.read().split()[-1]))
                        if not Name:
                            continue

                        Name = ' '.join(Name) if isinstance(Name, type(
                            [])) else Name
                        GuidXRefFile.write("%s %s\n" %
                                           (FileStatementGuid, Name))

    # Append GUIDs, Protocols, and PPIs to the Xref file
        GuidXRefFile.write("\n")
        for key, item in GuidDict.items():
            GuidXRefFile.write(
                "%s %s\n" %
                (GuidStructureStringToGuidString(item).upper(), key))

        if GuidXRefFile.getvalue():
            SaveFileOnChange(GuidXRefFileName, GuidXRefFile.getvalue(), False)
            GenFdsGlobalVariable.InfLogger(
                "\nGUID cross reference file can be found at %s" %
                GuidXRefFileName)
        elif os.path.exists(GuidXRefFileName):
            os.remove(GuidXRefFileName)
        GuidXRefFile.close()
Example #47
0
    def AddToBuffer(self,
                    Buffer,
                    BaseAddress,
                    BlockSizeList,
                    ErasePolarity,
                    ImageBinDict,
                    vtfDict=None,
                    MacroDict={},
                    Flag=False):
        Size = self.Size
        if not Flag:
            GenFdsGlobalVariable.InfLogger('\nGenerate Region at Offset 0x%X' %
                                           self.Offset)
            GenFdsGlobalVariable.InfLogger("   Region Size = 0x%X" % Size)
        GenFdsGlobalVariable.SharpCounter = 0
        if Flag and (self.RegionType != BINARY_FILE_TYPE_FV):
            return

        if self.RegionType == BINARY_FILE_TYPE_FV:
            #
            # Get Fv from FvDict
            #
            self.FvAddress = int(BaseAddress, 16) + self.Offset
            FvBaseAddress = '0x%X' % self.FvAddress
            FvOffset = 0
            for RegionData in self.RegionDataList:
                FileName = None
                if RegionData.endswith(".fv"):
                    RegionData = GenFdsGlobalVariable.MacroExtend(
                        RegionData, MacroDict)
                    if not Flag:
                        GenFdsGlobalVariable.InfLogger(
                            '   Region FV File Name = .fv : %s' % RegionData)
                    if RegionData[1] != ':':
                        RegionData = mws.join(
                            GenFdsGlobalVariable.WorkSpaceDir, RegionData)
                    if not os.path.exists(RegionData):
                        EdkLogger.error("GenFds",
                                        FILE_NOT_FOUND,
                                        ExtraData=RegionData)

                    FileName = RegionData
                elif RegionData.upper() + 'fv' in ImageBinDict:
                    if not Flag:
                        GenFdsGlobalVariable.InfLogger('   Region Name = FV')
                    FileName = ImageBinDict[RegionData.upper() + 'fv']
                else:
                    #
                    # Generate FvImage.
                    #
                    FvObj = None
                    if RegionData.upper(
                    ) in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
                        FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict[
                            RegionData.upper()]

                    if FvObj is not None:
                        if not Flag:
                            GenFdsGlobalVariable.InfLogger(
                                '   Region Name = FV')
                        #
                        # Call GenFv tool
                        #
                        self.BlockInfoOfRegion(BlockSizeList, FvObj)
                        self.FvAddress = self.FvAddress + FvOffset
                        FvAlignValue = GenFdsGlobalVariable.GetAlignment(
                            FvObj.FvAlignment)
                        if self.FvAddress % FvAlignValue != 0:
                            EdkLogger.error(
                                "GenFds", GENFDS_ERROR,
                                "FV (%s) is NOT %s Aligned!" %
                                (FvObj.UiFvName, FvObj.FvAlignment))
                        FvBuffer = BytesIO('')
                        FvBaseAddress = '0x%X' % self.FvAddress
                        BlockSize = None
                        BlockNum = None
                        FvObj.AddToBuffer(FvBuffer,
                                          FvBaseAddress,
                                          BlockSize,
                                          BlockNum,
                                          ErasePolarity,
                                          vtfDict,
                                          Flag=Flag)
                        if Flag:
                            continue

                        FvBufferLen = len(FvBuffer.getvalue())
                        if FvBufferLen > Size:
                            FvBuffer.close()
                            EdkLogger.error(
                                "GenFds", GENFDS_ERROR,
                                "Size of FV (%s) is larger than Region Size 0x%X specified."
                                % (RegionData, Size))
                        #
                        # Put the generated image into FD buffer.
                        #
                        Buffer.write(FvBuffer.getvalue())
                        FvBuffer.close()
                        FvOffset = FvOffset + FvBufferLen
                        Size = Size - FvBufferLen
                        continue
                    else:
                        EdkLogger.error(
                            "GenFds", GENFDS_ERROR,
                            "FV (%s) is NOT described in FDF file!" %
                            (RegionData))
                #
                # Add the exist Fv image into FD buffer
                #
                if not Flag:
                    if FileName is not None:
                        FileLength = os.stat(FileName)[ST_SIZE]
                        if FileLength > Size:
                            EdkLogger.error("GenFds", GENFDS_ERROR,
                                            "Size of FV File (%s) is larger than Region Size 0x%X specified." \
                                            % (RegionData, Size))
                        BinFile = open(FileName, 'rb')
                        Buffer.write(BinFile.read())
                        BinFile.close()
                        Size = Size - FileLength
            #
            # Pad the left buffer
            #
            if not Flag:
                self.PadBuffer(Buffer, ErasePolarity, Size)

        if self.RegionType == 'CAPSULE':
            #
            # Get Capsule from Capsule Dict
            #
            for RegionData in self.RegionDataList:
                if RegionData.endswith(".cap"):
                    RegionData = GenFdsGlobalVariable.MacroExtend(
                        RegionData, MacroDict)
                    GenFdsGlobalVariable.InfLogger(
                        '   Region CAPSULE Image Name = .cap : %s' %
                        RegionData)
                    if RegionData[1] != ':':
                        RegionData = mws.join(
                            GenFdsGlobalVariable.WorkSpaceDir, RegionData)
                    if not os.path.exists(RegionData):
                        EdkLogger.error("GenFds",
                                        FILE_NOT_FOUND,
                                        ExtraData=RegionData)

                    FileName = RegionData
                elif RegionData.upper() + 'cap' in ImageBinDict:
                    GenFdsGlobalVariable.InfLogger('   Region Name = CAPSULE')
                    FileName = ImageBinDict[RegionData.upper() + 'cap']
                else:
                    #
                    # Generate Capsule image and Put it into FD buffer
                    #
                    CapsuleObj = None
                    if RegionData.upper(
                    ) in GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict:
                        CapsuleObj = GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict[
                            RegionData.upper()]

                    if CapsuleObj is not None:
                        CapsuleObj.CapsuleName = RegionData.upper()
                        GenFdsGlobalVariable.InfLogger(
                            '   Region Name = CAPSULE')
                        #
                        # Call GenFv tool to generate Capsule Image
                        #
                        FileName = CapsuleObj.GenCapsule()
                        CapsuleObj.CapsuleName = None
                    else:
                        EdkLogger.error(
                            "GenFds", GENFDS_ERROR,
                            "Capsule (%s) is NOT described in FDF file!" %
                            (RegionData))

                #
                # Add the capsule image into FD buffer
                #
                FileLength = os.stat(FileName)[ST_SIZE]
                if FileLength > Size:
                    EdkLogger.error("GenFds", GENFDS_ERROR,
                                    "Size 0x%X of Capsule File (%s) is larger than Region Size 0x%X specified." \
                                    % (FileLength, RegionData, Size))
                BinFile = open(FileName, 'rb')
                Buffer.write(BinFile.read())
                BinFile.close()
                Size = Size - FileLength
            #
            # Pad the left buffer
            #
            self.PadBuffer(Buffer, ErasePolarity, Size)

        if self.RegionType in ('FILE', 'INF'):
            for RegionData in self.RegionDataList:
                if self.RegionType == 'INF':
                    RegionData.__InfParse__(None)
                    if len(RegionData.BinFileList) != 1:
                        EdkLogger.error(
                            'GenFds', GENFDS_ERROR,
                            'INF in FD region can only contain one binary: %s'
                            % RegionData)
                    File = RegionData.BinFileList[0]
                    RegionData = RegionData.PatchEfiFile(File.Path, File.Type)
                else:
                    RegionData = GenFdsGlobalVariable.MacroExtend(
                        RegionData, MacroDict)
                    if RegionData[1] != ':':
                        RegionData = mws.join(
                            GenFdsGlobalVariable.WorkSpaceDir, RegionData)
                    if not os.path.exists(RegionData):
                        EdkLogger.error("GenFds",
                                        FILE_NOT_FOUND,
                                        ExtraData=RegionData)
                #
                # Add the file image into FD buffer
                #
                FileLength = os.stat(RegionData)[ST_SIZE]
                if FileLength > Size:
                    EdkLogger.error("GenFds", GENFDS_ERROR,
                                    "Size of File (%s) is larger than Region Size 0x%X specified." \
                                    % (RegionData, Size))
                GenFdsGlobalVariable.InfLogger('   Region File Name = %s' %
                                               RegionData)
                BinFile = open(RegionData, 'rb')
                Buffer.write(BinFile.read())
                BinFile.close()
                Size = Size - FileLength
            #
            # Pad the left buffer
            #
            self.PadBuffer(Buffer, ErasePolarity, Size)

        if self.RegionType == 'DATA':
            GenFdsGlobalVariable.InfLogger('   Region Name = DATA')
            DataSize = 0
            for RegionData in self.RegionDataList:
                Data = RegionData.split(',')
                DataSize = DataSize + len(Data)
                if DataSize > Size:
                    EdkLogger.error(
                        "GenFds", GENFDS_ERROR,
                        "Size of DATA is larger than Region Size ")
                else:
                    for item in Data:
                        Buffer.write(pack('B', int(item, 16)))
                Size = Size - DataSize
            #
            # Pad the left buffer
            #
            self.PadBuffer(Buffer, ErasePolarity, Size)

        if self.RegionType is None:
            GenFdsGlobalVariable.InfLogger('   Region Name = None')
            self.PadBuffer(Buffer, ErasePolarity, Size)
Example #48
0
    def _generate_thumbnail(self):
        image = PIL.Image.open(self.image_path)

        # If no ExifTags, no rotating needed.
        try:
            # Grab orientation value.
            image_exif = image._getexif()
            image_orientation = image_exif[274]

            # Rotate depending on orientation.
            if image_orientation == 2:
                image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT)
            if image_orientation == 3:
                image = image.transpose(PIL.Image.ROTATE_180)
            if image_orientation == 4:
                image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM)
            if image_orientation == 5:
                image = image.transpose(PIL.Image.FLIP_LEFT_RIGHT).transpose(
                    PIL.Image.ROTATE_90)
            if image_orientation == 6:
                image = image.transpose(PIL.Image.ROTATE_270)
            if image_orientation == 7:
                image = image.transpose(PIL.Image.FLIP_TOP_BOTTOM).transpose(
                    PIL.Image.ROTATE_90)
            if image_orientation == 8:
                image = image.transpose(PIL.Image.ROTATE_90)
        except:
            pass

        # make thumbnails
        image.thumbnail(ownphotos.settings.THUMBNAIL_SIZE_BIG,
                        PIL.Image.ANTIALIAS)
        image_io_thumb = BytesIO()
        image.save(image_io_thumb, format="JPEG")
        self.thumbnail_big.save(self.image_hash + '.jpg',
                                ContentFile(image_io_thumb.getvalue()))
        image_io_thumb.close()

        square_thumb = ImageOps.fit(image,
                                    ownphotos.settings.THUMBNAIL_SIZE_BIG,
                                    PIL.Image.ANTIALIAS)
        image_io_square_thumb = BytesIO()
        square_thumb.save(image_io_square_thumb, format="JPEG")
        self.square_thumbnail_big.save(
            self.image_hash + '.jpg',
            ContentFile(image_io_square_thumb.getvalue()))
        image_io_square_thumb.close()

        image.thumbnail(ownphotos.settings.THUMBNAIL_SIZE_MEDIUM,
                        PIL.Image.ANTIALIAS)
        image_io_thumb = BytesIO()
        image.save(image_io_thumb, format="JPEG")
        self.thumbnail.save(self.image_hash + '.jpg',
                            ContentFile(image_io_thumb.getvalue()))
        image_io_thumb.close()

        square_thumb = ImageOps.fit(image,
                                    ownphotos.settings.THUMBNAIL_SIZE_MEDIUM,
                                    PIL.Image.ANTIALIAS)
        image_io_square_thumb = BytesIO()
        square_thumb.save(image_io_square_thumb, format="JPEG")
        self.square_thumbnail.save(
            self.image_hash + '.jpg',
            ContentFile(image_io_square_thumb.getvalue()))
        image_io_square_thumb.close()

        image.thumbnail(ownphotos.settings.THUMBNAIL_SIZE_SMALL,
                        PIL.Image.ANTIALIAS)
        image_io_thumb = BytesIO()
        image.save(image_io_thumb, format="JPEG")
        self.thumbnail_small.save(self.image_hash + '.jpg',
                                  ContentFile(image_io_thumb.getvalue()))
        image_io_thumb.close()

        square_thumb = ImageOps.fit(image,
                                    ownphotos.settings.THUMBNAIL_SIZE_SMALL,
                                    PIL.Image.ANTIALIAS)
        image_io_square_thumb = BytesIO()
        square_thumb.save(image_io_square_thumb, format="JPEG")
        self.square_thumbnail_small.save(
            self.image_hash + '.jpg',
            ContentFile(image_io_square_thumb.getvalue()))
        image_io_square_thumb.close()

        image.thumbnail(ownphotos.settings.THUMBNAIL_SIZE_TINY,
                        PIL.Image.ANTIALIAS)
        image_io_thumb = BytesIO()
        image.save(image_io_thumb, format="JPEG")
        self.thumbnail_tiny.save(self.image_hash + '.jpg',
                                 ContentFile(image_io_thumb.getvalue()))
        image_io_thumb.close()

        square_thumb = ImageOps.fit(image,
                                    ownphotos.settings.THUMBNAIL_SIZE_TINY,
                                    PIL.Image.ANTIALIAS)
        image_io_square_thumb = BytesIO()
        square_thumb.save(image_io_square_thumb, format="JPEG")
        self.square_thumbnail_tiny.save(
            self.image_hash + '.jpg',
            ContentFile(image_io_square_thumb.getvalue()))
        image_io_square_thumb.close()
Example #49
0
class StreamMuxer:
    """StreamMuxer re-packages video/audio packets for output."""

    def __init__(
        self,
        hass: HomeAssistant,
        video_stream: av.video.VideoStream,
        audio_stream: av.audio.stream.AudioStream | None,
        stream_state: StreamState,
    ) -> None:
        """Initialize StreamMuxer."""
        self._hass = hass
        self._segment_start_dts: int = cast(int, None)
        self._memory_file: BytesIO = cast(BytesIO, None)
        self._av_output: av.container.OutputContainer = None
        self._input_video_stream: av.video.VideoStream = video_stream
        self._input_audio_stream: av.audio.stream.AudioStream | None = audio_stream
        self._output_video_stream: av.video.VideoStream = None
        self._output_audio_stream: av.audio.stream.AudioStream | None = None
        self._segment: Segment | None = None
        # the following 3 member variables are used for Part formation
        self._memory_file_pos: int = cast(int, None)
        self._part_start_dts: int = cast(int, None)
        self._part_has_keyframe = False
        self._stream_settings: StreamSettings = hass.data[DOMAIN][ATTR_SETTINGS]
        self._stream_state = stream_state
        self._start_time = datetime.datetime.utcnow()

    def make_new_av(
        self,
        memory_file: BytesIO,
        sequence: int,
        input_vstream: av.video.VideoStream,
        input_astream: av.audio.stream.AudioStream | None,
    ) -> tuple[
        av.container.OutputContainer,
        av.video.VideoStream,
        av.audio.stream.AudioStream | None,
    ]:
        """Make a new av OutputContainer and add output streams."""
        container = av.open(
            memory_file,
            mode="w",
            format=SEGMENT_CONTAINER_FORMAT,
            container_options={
                **{
                    # Removed skip_sidx - see https://github.com/home-assistant/core/pull/39970
                    # "cmaf" flag replaces several of the movflags used, but too recent to use for now
                    "movflags": "frag_custom+empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer",
                    # Sometimes the first segment begins with negative timestamps, and this setting just
                    # adjusts the timestamps in the output from that segment to start from 0. Helps from
                    # having to make some adjustments in test_durations
                    "avoid_negative_ts": "make_non_negative",
                    "fragment_index": str(sequence + 1),
                    "video_track_timescale": str(int(1 / input_vstream.time_base)),
                },
                # Only do extra fragmenting if we are using ll_hls
                # Let ffmpeg do the work using frag_duration
                # Fragment durations may exceed the 15% allowed variance but it seems ok
                **(
                    {
                        "movflags": "empty_moov+default_base_moof+frag_discont+negative_cts_offsets+skip_trailer",
                        # Create a fragment every TARGET_PART_DURATION. The data from each fragment is stored in
                        # a "Part" that can be combined with the data from all the other "Part"s, plus an init
                        # section, to reconstitute the data in a "Segment".
                        # The LL-HLS spec allows for a fragment's duration to be within the range [0.85x,1.0x]
                        # of the part target duration. We use the frag_duration option to tell ffmpeg to try to
                        # cut the fragments when they reach frag_duration. However, the resulting fragments can
                        # have variability in their durations and can end up being too short or too long. With a
                        # video track with no audio, the discrete nature of frames means that the frame at the
                        # end of a fragment will sometimes extend slightly beyond the desired frag_duration.
                        # If there are two tracks, as in the case of a video feed with audio, there is an added
                        # wrinkle as the fragment cut seems to be done on the first track that crosses the desired
                        # threshold, and cutting on the audio track may also result in a shorter video fragment
                        # than desired.
                        # Given this, our approach is to give ffmpeg a frag_duration somewhere in the middle
                        # of the range, hoping that the parts stay pretty well bounded, and we adjust the part
                        # durations a bit in the hls metadata so that everything "looks" ok.
                        "frag_duration": str(
                            self._stream_settings.part_target_duration * 9e5
                        ),
                    }
                    if self._stream_settings.ll_hls
                    else {}
                ),
            },
        )
        output_vstream = container.add_stream(template=input_vstream)
        # Check if audio is requested
        output_astream = None
        if input_astream:
            output_astream = container.add_stream(template=input_astream)
        return container, output_vstream, output_astream

    def reset(self, video_dts: int) -> None:
        """Initialize a new stream segment."""
        self._part_start_dts = self._segment_start_dts = video_dts
        self._segment = None
        self._memory_file = BytesIO()
        self._memory_file_pos = 0
        (
            self._av_output,
            self._output_video_stream,
            self._output_audio_stream,
        ) = self.make_new_av(
            memory_file=self._memory_file,
            sequence=self._stream_state.next_sequence(),
            input_vstream=self._input_video_stream,
            input_astream=self._input_audio_stream,
        )
        if self._output_video_stream.name == "hevc":
            self._output_video_stream.codec_tag = "hvc1"

    def mux_packet(self, packet: av.Packet) -> None:
        """Mux a packet to the appropriate output stream."""

        # Check for end of segment
        if packet.stream == self._input_video_stream:
            if (
                packet.is_keyframe
                and (packet.dts - self._segment_start_dts) * packet.time_base
                >= self._stream_settings.min_segment_duration
            ):
                # Flush segment (also flushes the stub part segment)
                self.flush(packet, last_part=True)

            # Mux the packet
            packet.stream = self._output_video_stream
            self._av_output.mux(packet)
            self.check_flush_part(packet)
            self._part_has_keyframe |= packet.is_keyframe

        elif packet.stream == self._input_audio_stream:
            packet.stream = self._output_audio_stream
            self._av_output.mux(packet)

    def check_flush_part(self, packet: av.Packet) -> None:
        """Check for and mark a part segment boundary and record its duration."""
        if self._memory_file_pos == self._memory_file.tell():
            return
        if self._segment is None:
            # We have our first non-zero byte position. This means the init has just
            # been written. Create a Segment and put it to the queue of each output.
            self._segment = Segment(
                sequence=self._stream_state.sequence,
                stream_id=self._stream_state.stream_id,
                init=self._memory_file.getvalue(),
                # Fetch the latest StreamOutputs, which may have changed since the
                # worker started.
                stream_outputs=self._stream_state.outputs,
                start_time=self._start_time,
            )
            self._memory_file_pos = self._memory_file.tell()
        else:  # These are the ends of the part segments
            self.flush(packet, last_part=False)

    def flush(self, packet: av.Packet, last_part: bool) -> None:
        """Output a part from the most recent bytes in the memory_file.

        If last_part is True, also close the segment, give it a duration,
        and clean up the av_output and memory_file.
        There are two different ways to enter this function, and when
        last_part is True, packet has not yet been muxed, while when
        last_part is False, the packet has already been muxed. However,
        in both cases, packet is the next packet and is not included in
        the Part.
        This function writes the duration metadata for the Part and
        for the Segment. However, as the fragmentation done by ffmpeg
        may result in fragment durations which fall outside the
        [0.85x,1.0x] tolerance band allowed by LL-HLS, we need to fudge
        some durations a bit by reporting them as being within that
        range.
        Note that repeated adjustments may cause drift between the part
        durations in the metadata and those in the media and result in
        playback issues in some clients.
        """
        # Part durations should not exceed the part target duration
        adjusted_dts = min(
            packet.dts,
            self._part_start_dts
            + self._stream_settings.part_target_duration / packet.time_base,
        )
        if last_part:
            # Closing the av_output will write the remaining buffered data to the
            # memory_file as a new moof/mdat.
            self._av_output.close()
        elif not self._part_has_keyframe:
            # Parts which are not the last part or an independent part should
            # not have durations below 0.85 of the part target duration.
            adjusted_dts = max(
                adjusted_dts,
                self._part_start_dts
                + 0.85 * self._stream_settings.part_target_duration / packet.time_base,
            )
        assert self._segment
        self._memory_file.seek(self._memory_file_pos)
        self._hass.loop.call_soon_threadsafe(
            self._segment.async_add_part,
            Part(
                duration=float(
                    (adjusted_dts - self._part_start_dts) * packet.time_base
                ),
                has_keyframe=self._part_has_keyframe,
                data=self._memory_file.read(),
            ),
            (
                segment_duration := float(
                    (adjusted_dts - self._segment_start_dts) * packet.time_base
                )
            )
            if last_part
            else 0,
        )
        if last_part:
            # If we've written the last part, we can close the memory_file.
            self._memory_file.close()  # We don't need the BytesIO object anymore
            self._start_time += datetime.timedelta(seconds=segment_duration)
            # Reinitialize
            self.reset(packet.dts)
        else:
            # For the last part, these will get set again elsewhere so we can skip
            # setting them here.
            self._memory_file_pos = self._memory_file.tell()
            self._part_start_dts = adjusted_dts
        self._part_has_keyframe = False

    def close(self) -> None:
        """Close stream buffer."""
        self._av_output.close()
        self._memory_file.close()
Example #50
0
class XportReader(abc.Iterator):
    __doc__ = _xport_reader_doc

    def __init__(self,
                 filepath_or_buffer,
                 index=None,
                 encoding="ISO-8859-1",
                 chunksize=None):

        self._encoding = encoding
        self._lines_read = 0
        self._index = index
        self._chunksize = chunksize

        if isinstance(filepath_or_buffer, str):
            (
                filepath_or_buffer,
                encoding,
                compression,
                should_close,
            ) = get_filepath_or_buffer(filepath_or_buffer, encoding=encoding)

        if isinstance(filepath_or_buffer, (str, bytes)):
            self.filepath_or_buffer = open(filepath_or_buffer, "rb")
        else:
            # Copy to BytesIO, and ensure no encoding
            contents = filepath_or_buffer.read()
            try:
                contents = contents.encode(self._encoding)
            except UnicodeEncodeError:
                pass
            self.filepath_or_buffer = BytesIO(contents)

        self._read_header()

    def close(self):
        self.filepath_or_buffer.close()

    def _get_row(self):
        return self.filepath_or_buffer.read(80).decode()

    def _read_header(self):
        self.filepath_or_buffer.seek(0)

        # read file header
        line1 = self._get_row()
        if line1 != _correct_line1:
            self.close()
            raise ValueError("Header record is not an XPORT file.")

        line2 = self._get_row()
        fif = [["prefix", 24], ["version", 8], ["OS", 8], ["_", 24],
               ["created", 16]]
        file_info = _split_line(line2, fif)
        if file_info["prefix"] != "SAS     SAS     SASLIB":
            self.close()
            raise ValueError("Header record has invalid prefix.")
        file_info["created"] = _parse_date(file_info["created"])
        self.file_info = file_info

        line3 = self._get_row()
        file_info["modified"] = _parse_date(line3[:16])

        # read member header
        header1 = self._get_row()
        header2 = self._get_row()
        headflag1 = header1.startswith(_correct_header1)
        headflag2 = header2 == _correct_header2
        if not (headflag1 and headflag2):
            self.close()
            raise ValueError("Member header not found")
        # usually 140, could be 135
        fieldnamelength = int(header1[-5:-2])

        # member info
        mem = [
            ["prefix", 8],
            ["set_name", 8],
            ["sasdata", 8],
            ["version", 8],
            ["OS", 8],
            ["_", 24],
            ["created", 16],
        ]
        member_info = _split_line(self._get_row(), mem)
        mem = [["modified", 16], ["_", 16], ["label", 40], ["type", 8]]
        member_info.update(_split_line(self._get_row(), mem))
        member_info["modified"] = _parse_date(member_info["modified"])
        member_info["created"] = _parse_date(member_info["created"])
        self.member_info = member_info

        # read field names
        types = {1: "numeric", 2: "char"}
        fieldcount = int(self._get_row()[54:58])
        datalength = fieldnamelength * fieldcount
        # round up to nearest 80
        if datalength % 80:
            datalength += 80 - datalength % 80
        fielddata = self.filepath_or_buffer.read(datalength)
        fields = []
        obs_length = 0
        while len(fielddata) >= fieldnamelength:
            # pull data for one field
            field, fielddata = (
                fielddata[:fieldnamelength],
                fielddata[fieldnamelength:],
            )

            # rest at end gets ignored, so if field is short, pad out
            # to match struct pattern below
            field = field.ljust(140)

            fieldstruct = struct.unpack(">hhhh8s40s8shhh2s8shhl52s", field)
            field = dict(zip(_fieldkeys, fieldstruct))
            del field["_"]
            field["ntype"] = types[field["ntype"]]
            fl = field["field_length"]
            if field["ntype"] == "numeric" and ((fl < 2) or (fl > 8)):
                self.close()
                msg = f"Floating field width {fl} is not between 2 and 8."
                raise TypeError(msg)

            for k, v in field.items():
                try:
                    field[k] = v.strip()
                except AttributeError:
                    pass

            obs_length += field["field_length"]
            fields += [field]

        header = self._get_row()
        if not header == _correct_obs_header:
            self.close()
            raise ValueError("Observation header not found.")

        self.fields = fields
        self.record_length = obs_length
        self.record_start = self.filepath_or_buffer.tell()

        self.nobs = self._record_count()
        self.columns = [x["name"].decode() for x in self.fields]

        # Setup the dtype.
        dtypel = [("s" + str(i), "S" + str(field["field_length"]))
                  for i, field in enumerate(self.fields)]
        dtype = np.dtype(dtypel)
        self._dtype = dtype

    def __next__(self):
        return self.read(nrows=self._chunksize or 1)

    def _record_count(self) -> int:
        """
        Get number of records in file.

        This is maybe suboptimal because we have to seek to the end of
        the file.

        Side effect: returns file position to record_start.
        """
        self.filepath_or_buffer.seek(0, 2)
        total_records_length = self.filepath_or_buffer.tell(
        ) - self.record_start

        if total_records_length % 80 != 0:
            warnings.warn("xport file may be corrupted")

        if self.record_length > 80:
            self.filepath_or_buffer.seek(self.record_start)
            return total_records_length // self.record_length

        self.filepath_or_buffer.seek(-80, 2)
        last_card = self.filepath_or_buffer.read(80)
        last_card = np.frombuffer(last_card, dtype=np.uint64)

        # 8 byte blank
        ix = np.flatnonzero(last_card == 2314885530818453536)

        if len(ix) == 0:
            tail_pad = 0
        else:
            tail_pad = 8 * len(ix)

        self.filepath_or_buffer.seek(self.record_start)

        return (total_records_length - tail_pad) // self.record_length

    def get_chunk(self, size=None):
        """
        Reads lines from Xport file and returns as dataframe

        Parameters
        ----------
        size : int, defaults to None
            Number of lines to read.  If None, reads whole file.

        Returns
        -------
        DataFrame
        """
        if size is None:
            size = self._chunksize
        return self.read(nrows=size)

    def _missing_double(self, vec):
        v = vec.view(dtype="u1,u1,u2,u4")
        miss = (v["f1"] == 0) & (v["f2"] == 0) & (v["f3"] == 0)
        miss1 = (((v["f0"] >= 0x41) & (v["f0"] <= 0x5A))
                 | (v["f0"] == 0x5F)
                 | (v["f0"] == 0x2E))
        miss &= miss1
        return miss

    @Appender(_read_method_doc)
    def read(self, nrows=None):

        if nrows is None:
            nrows = self.nobs

        read_lines = min(nrows, self.nobs - self._lines_read)
        read_len = read_lines * self.record_length
        if read_len <= 0:
            self.close()
            raise StopIteration
        raw = self.filepath_or_buffer.read(read_len)
        data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)

        df = pd.DataFrame(index=range(read_lines))
        for j, x in enumerate(self.columns):
            vec = data["s" + str(j)]
            ntype = self.fields[j]["ntype"]
            if ntype == "numeric":
                vec = _handle_truncated_float_vec(
                    vec, self.fields[j]["field_length"])
                miss = self._missing_double(vec)
                v = _parse_float_vec(vec)
                v[miss] = np.nan
            elif self.fields[j]["ntype"] == "char":
                v = [y.rstrip() for y in vec]

                if self._encoding is not None:
                    v = [y.decode(self._encoding) for y in v]

            df[x] = v

        if self._index is None:
            df.index = range(self._lines_read, self._lines_read + read_lines)
        else:
            df = df.set_index(self._index)

        self._lines_read += read_lines

        return df
Example #51
0
def pdfEtiquetaEndereco(request):
    """
    Gera arquivo PDF das etiquetas solicitadas
    """

    if 'dataEtiquetaEndereco' not in request.session:
        redirect('etiqueta.etiqueta_endereco')

    data = request.session['dataEtiquetaEndereco']
    if not data:
        redirect('etiqueta.etiqueta_endereco')

    request.session.pop('dataEtiquetaEndereco')

    if settings.DEBUG:
        logo_company = settings.BASE_DIR + '/onyxlog/core/static/img/logo_company_label.jpg'
        logo_company2 = settings.BASE_DIR + '/onyxlog/core/static/img/logo_company_label2.jpg'
    else:
        logo_company = settings.STATIC_ROOT + '/img/logo_company_label.jpg'
        logo_company2 = settings.STATIC_ROOT + '/img/logo_company_label2.jpg'

    response = HttpResponse(content_type='application/pdf')
    response[
        'Content-Disposition'] = 'attachment; filename="etiqueta_endereco.pdf"'

    buffer = BytesIO()

    p = canvas.Canvas(buffer, pagesize=(378, 264))

    for endereco in data:
        # cabeçalho
        p.drawImage(
            logo_company,
            5,
            228,
        )
        p.drawImage(
            logo_company2,
            292,
            228,
        )
        p.drawCentredString(193, 249, "Etiqueta de Identificação.")
        p.drawCentredString(193, 231, "de Endereço.")

        # label dos detalhes
        p.setFontSize(7)
        p.rect(5, 191, 365, 31, fill=0)
        p.drawString(12, 214, "Planta")

        p.rect(5, 129, 365, 62, fill=0)
        p.drawString(12, 183, "Endereço")

        # box do codigo de barras
        p.rect(5, 10, 365, 119, fill=0)

        # imprime os dados
        p.setFontSize(16)
        p.drawString(17, 198, endereco['planta'])

        p.setFontSize(22)
        p.drawCentredString(188, 151, endereco['codigo'])

        # codigo de barras
        barcode = code128.Code128(endereco['codigo'],
                                  barWidth=0.5 * mm,
                                  barHeight=30 * mm)
        barcode.drawOn(p, 50, 35)
        p.showPage()

    p.save()

    pdf = buffer.getvalue()
    buffer.close()
    response.write(pdf)
    return response
Example #52
0
    def plot_strain_curve(self, axv=0):
        # 開始繪圖
        self.fig, self.ax = plt.subplots()
        # plt.figure()
        plt.xlabel('frame')
        if self.json_para['curve_bound'] != 0:
            plt.ylim(-self.json_para['curve_bound'],
                     self.json_para['curve_bound'])

        for i in self.cv2_gui.result_distance.keys():

            # 抓出對應的顏色,並轉呈 matplotlib 的 RGB 0-1 格式
            color = tuple([
                self.cv2_gui.colors[i % self.cv2_gui.num_of_color][-j] / 255
                for j in range(1, 4)
            ])

            if self.radioButton_strain.isChecked():
                plt.axhline(0, color='k', alpha=0.2)
                if self.radioButton_spline.isChecked():
                    plt.plot(gui_tool.lsq_spline_medain(
                        self.cv2_gui.result_strain[i]),
                             color=color)
                elif self.radioButton_original.isChecked():
                    plt.plot(self.cv2_gui.result_strain[i], color=color)

                plt.ylabel('Strain')
                plt.title('Strain curve')

            elif self.radioButton_distance.isChecked():
                if self.radioButton_spline.isChecked():
                    plt.plot(gui_tool.lsq_spline_medain(
                        self.cv2_gui.result_distance[i]),
                             color=color)
                elif self.radioButton_original.isChecked():
                    plt.plot(self.cv2_gui.result_distance[i], color=color)

                plt.ylabel('Distance')
                plt.title('Distance curve')

        if axv != 0: plt.axvline(axv, color='k', alpha=0.2)

        # 申請緩衝地址
        buffer_ = BytesIO()

        # 儲存在記憶體中,而不是在本地磁碟,注意這個預設認為你要儲存的就是plt中的內容
        plt.savefig(buffer_, format='png')
        plt.close()
        buffer_.seek(0)

        # 用PIL或CV2從記憶體中讀取
        dataPIL = PIL.Image.open(buffer_)

        # 轉換為nparrary,PIL轉換就非常快了,data即為所需
        self.result_curve_temp = cv2.cvtColor(np.asarray(dataPIL),
                                              cv2.COLOR_BGR2RGB)

        # 顯示
        self.label_show_curve.setPixmap(
            QtGui.QPixmap(gui_tool.convert2qtimg(self.result_curve_temp)))
        self.label_show_curve.setScaledContents(True)

        # 釋放快取
        buffer_.close()
Example #53
0
 def send_webcam_frame(self):
     if not self.webcam_lock.acquire(False):
         return False
     log("send_webcam_frame() webcam_device=%s", self.webcam_device)
     try:
         assert self.webcam_device_no >= 0, "device number is not set"
         assert self.webcam_device, "no webcam device to capture from"
         from xpra.codecs.pillow.encoder import get_encodings
         client_webcam_encodings = get_encodings()
         common_encodings = list(
             set(self.server_webcam_encodings).intersection(
                 client_webcam_encodings))
         log("common encodings (server=%s, client=%s): %s",
             csv(self.server_encodings), csv(client_webcam_encodings),
             csv(common_encodings))
         if not common_encodings:
             log.error("Error: cannot send webcam image, no common formats")
             log.error(" the server supports: %s",
                       csv(self.server_webcam_encodings))
             log.error(" the client supports: %s",
                       csv(client_webcam_encodings))
             self.stop_sending_webcam()
             return False
         preferred_order = ["jpeg", "png", "png/L", "png/P", "webp"]
         formats = [x for x in preferred_order if x in common_encodings
                    ] + common_encodings
         encoding = formats[0]
         start = monotonic()
         import cv2
         ret, frame = self.webcam_device.read()
         assert ret, "capture failed"
         assert frame.ndim == 3, "invalid frame data"
         h, w, Bpp = frame.shape
         assert Bpp == 3 and frame.size == w * h * Bpp
         rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # @UndefinedVariable
         end = monotonic()
         log("webcam frame capture took %ims", (end - start) * 1000)
         start = monotonic()
         from PIL import Image
         from io import BytesIO
         image = Image.fromarray(rgb)
         buf = BytesIO()
         image.save(buf, format=encoding)
         data = buf.getvalue()
         buf.close()
         end = monotonic()
         log("webcam frame compression to %s took %ims", encoding,
             (end - start) * 1000)
         frame_no = self.webcam_frame_no
         self.webcam_frame_no += 1
         self.send("webcam-frame", self.webcam_device_no, frame_no,
                   encoding, w, h, compression.Compressed(encoding, data))
         self.cancel_webcam_check_ack_timer()
         self.webcam_ack_check_timer = self.timeout_add(
             10 * 1000, self.webcam_check_acks)
         return True
     except Exception as e:
         log.error("webcam frame %i failed",
                   self.webcam_frame_no,
                   exc_info=True)
         log.error("Error sending webcam frame: %s", e)
         self.stop_sending_webcam()
         summary = "Webcam forwarding has failed"
         body = "The system encountered the following error:\n" + \
             ("%s\n" % e)
         self.may_notify(XPRA_WEBCAM_NOTIFICATION_ID,
                         summary,
                         body,
                         expire_timeout=10 * 1000,
                         icon_name="webcam")
         return False
     finally:
         self.webcam_lock.release()
Example #54
0
def _convert_table_to_png(table_html: str) -> Optional[bytes]:
    soup = BeautifulSoup(table_html, 'lxml')
    table = soup.find('table')
    if not table:
        return None
    wrap_length = 85
    column_labels: list[str] = []
    row_labels: list[str] = []
    cell_texts: list[list[str]] = []
    thead = table.find('thead')
    try:
        if thead:
            column_labels = [label.text for label in thead.find_all('th')]
            thead.decompose()
        else:
            maybe_thead = table.find('tr')
            if maybe_thead:
                ths = maybe_thead.find_all('th')
                if len(ths) > 1:
                    column_labels = [label.text for label in ths]
                    maybe_thead.decompose()
        rows = table.find_all('tr')
        if rows:
            for ori_width in rows:
                th = ori_width.find('th')
                if th:
                    row_labels.append(th.text)
                cell_texts.append(
                    [cell.text for cell in ori_width.find_all('td')])
        if not cell_texts:
            if column_labels:
                cell_texts.append(column_labels)
                column_labels = row_labels = []
            elif row_labels:
                cell_texts = [[label] for label in row_labels]
                column_labels = row_labels = []
            else:
                return None
        # ensure row number and column number
        max_columns = max(max(len(row) for row in cell_texts),
                          len(column_labels))
        max_rows = max(len(cell_texts), len(row_labels))
        if min(max_columns, max_rows) == 0:
            return None
        if column_labels and len(column_labels) < max_columns:
            column_labels += [''] * (max_columns - len(column_labels))
        if row_labels and len(row_labels) < max_rows:
            row_labels += [''] * (max_rows - len(row_labels))
        if len(cell_texts) < max_rows:
            cell_texts += [[''] * max_columns] * (max_rows - len(cell_texts))
        wrap_length = max(wrap_length // max_columns, 10)
        for i, row in enumerate(cell_texts):
            cell_texts[i] = [fill(cell, wrap_length) for cell in row]
        for i, label in enumerate(column_labels):
            column_labels[i] = fill(label, wrap_length)
        for i, label in enumerate(row_labels):
            row_labels[i] = fill(label, wrap_length)

        auto_set_column_width_flag = True
        for _ in range(2):
            try:
                # draw table
                table = ax.table(cellText=cell_texts,
                                 rowLabels=row_labels or None,
                                 colLabels=column_labels or None,
                                 loc='center',
                                 cellLoc='center',
                                 rowLoc='center')
                if auto_set_column_width_flag:
                    table.auto_set_column_width(tuple(range(max_columns)))
                # set row height
                cell_d = table.get_celld()
                row_range = {xy[0] for xy in cell_d}
                column_range = {xy[1] for xy in cell_d}
                row_heights = {
                    row:
                    max(cell.get_height() *
                        (cell.get_text().get_text().count('\n') + 1) * 0.75 +
                        cell.get_height() * 0.25
                        for cell in (cell_d[row, column]
                                     for column in column_range))
                    for row in row_range
                }
                for xy, cell in cell_d.items():
                    cell.set_height(row_heights[xy[0]])
                fig.set_constrained_layout(True)
                ax.axis('off')
                plt_buffer = BytesIO()
                fig.savefig(plt_buffer, format='png', dpi=200)
            except UserWarning:
                # if auto_set_column_width_flag:
                #     auto_set_column_width_flag = False  # oops, overflowed!
                #     continue  # once a figure is exported, some stuff may be frozen, so we need to re-create the table
                return None
            except Exception as e:
                raise e
            finally:
                # noinspection PyBroadException
                try:
                    plt.cla()
                except Exception:
                    pass
            # crop
            # noinspection PyUnboundLocalVariable
            image = Image.open(plt_buffer)
            ori_width, ori_height = image.size
            # trim white border
            upper = left = 0
            lower, right = ori_height - 1, ori_width - 1
            while left + 1 < ori_width and upper + 1 < ori_height and image.getpixel(
                (left, upper))[0] >= 128:
                upper += 1
                left += 1
            while upper - 1 >= 0 and image.getpixel(
                (left, upper - 1))[0] < 128:
                upper -= 1
            while left - 1 >= 0 and image.getpixel((left - 1, upper))[0] < 128:
                left -= 1
            while right - 1 >= 0 and lower - 1 >= 0 and image.getpixel(
                (right, lower))[0] >= 128:
                lower -= 1
                right -= 1
            while lower + 1 < ori_height and image.getpixel(
                (right, lower + 1))[0] < 128:
                lower += 1
            while right + 1 < ori_width and image.getpixel(
                (right + 1, lower))[0] < 128:
                right += 1
            # add a slim border
            border_width = 15
            left = max(0, left - border_width)
            right = min(ori_width - 1, right + border_width)
            upper = max(0, upper - border_width)
            lower = min(ori_height - 1, lower + border_width)
            width, height = right - left, lower - upper
            # ensure aspect ratio
            max_aspect_ratio = 15
            if width / height > max_aspect_ratio:
                height = ceil(width / max_aspect_ratio)
                middle = int((upper + lower) / 2)
                upper = middle - height // 2
                lower = middle + height // 2
            elif height / width > max_aspect_ratio:
                width = ceil(height / max_aspect_ratio)
                middle = int((left + right) / 2)
                left = middle - width // 2
                right = middle + width // 2
            old_image = image
            image = image.crop((left, upper, right, lower))
            old_image.close()
            buffer = BytesIO()
            image.save(buffer, format='png')
            ret = buffer.getvalue()
            image.close()
            buffer.close()
            plt_buffer.close()
            return ret
    except Exception as e:
        logger.debug('Drawing table failed', exc_info=e)
        return None
Example #55
0
File: ipg.py Project: rajul/ginga
def showplt():
    buf = BytesIO()
    plt.savefig(buf, bbox_inches=0)
    img = Image(data=bytes(buf.getvalue()), format='png', embed=True)
    buf.close()
    return img
Example #56
0
def pdfEtiquetaProduto(request):
    """
    Gera arquivo PDF das etiquetas solicitadas
    """

    if 'dataEtiqueta' not in request.session:
        redirect('etiqueta.etiqueta_produto')

    data = request.session['dataEtiqueta']
    if not data:
        redirect('etiqueta.etiqueta_produto')

    request.session.pop('dataEtiqueta')

    if settings.DEBUG:
        logo_company = settings.BASE_DIR + '/onyxlog/core/static/img/logo_company_label.jpg'
        logo_company2 = settings.BASE_DIR + '/onyxlog/core/static/img/logo_company_label2.jpg'
    else:
        logo_company = settings.STATIC_ROOT + '/img/logo_company_label.jpg'
        logo_company2 = settings.STATIC_ROOT + '/img/logo_company_label2.jpg'

    response = HttpResponse(content_type='application/pdf')
    response[
        'Content-Disposition'] = 'attachment; filename="etiquetas_produto.pdf"'

    buffer = BytesIO()

    p = canvas.Canvas(buffer, pagesize=(378, 264))

    for produto in data:
        # cabeçalho
        p.drawImage(
            logo_company,
            5,
            228,
        )
        p.drawImage(
            logo_company2,
            292,
            228,
        )
        p.drawString(122, 239, "Etiqueta de Identificação.")

        # label dos detalhes
        p.setFontSize(7)
        p.rect(5, 191, 365, 31, fill=0)
        p.drawString(7, 214, "Produto")

        p.rect(5, 160, 80, 31, fill=0)
        p.drawString(7, 183, "UN")

        p.rect(85, 160, 90, 31, fill=0)
        p.drawString(88, 183, "Qtd")

        p.rect(175, 160, 90, 31, fill=0)
        p.drawString(178, 183, "Nota")

        p.rect(265, 160, 105, 31, fill=0)
        p.drawString(268, 183, "Pedido")

        p.rect(5, 129, 365, 31, fill=0)
        p.drawString(7, 152, "Fornecedor")

        p.rect(265, 129, 105, 31, fill=0)
        p.drawString(268, 152, "Validade")

        # box do codigo de barras
        p.rect(5, 10, 220, 119, fill=0)

        p.rect(225, 72, 145, 57, fill=0)
        p.drawString(227, 121, "Código do Produto")
        p.rect(225, 75, 145, 40, fill=1)

        p.rect(225, 41, 145, 31, fill=0)
        p.drawString(227, 65, "Endereço")

        p.rect(225, 10, 145, 31, fill=0)
        p.drawString(227, 34, "Recebimento")

        # imprime os dados
        p.setFontSize(10)
        p.drawString(7, 200, produto['descricao'])

        p.setFontSize(16)
        p.drawString(17, 167, produto['un'])
        p.drawString(88, 167, produto['qtd'])
        p.drawString(178, 167, produto['nota'])
        p.drawString(268, 167, produto['pedido'])
        p.drawString(17, 136, produto['fornecedor'])
        p.drawString(268, 136, produto['validade'])
        p.drawString(242, 19, produto['recebimento'])

        # codigo de barras
        barcode = code128.Code128(produto['codigo'],
                                  barWidth=0.5 * mm,
                                  barHeight=30 * mm)
        barcode.drawOn(p, 5, 35)
        p.setFontSize(10)
        p.drawCentredString(120, 15, produto['codigo'])

        p.setFontSize(16)
        p.setFillColorRGB(255, 255, 255)
        p.drawString(230, 90, produto['codigo'])

        p.setFontSize(14)
        p.setFillColorRGB(0, 0, 0)
        p.drawString(230, 50, produto['endereco'])
        p.showPage()

    p.save()

    pdf = buffer.getvalue()
    buffer.close()
    response.write(pdf)
    return response
def main(argv):
    if argv.count('-a')>0:
        noDump=True
        argv.remove('-a')
    else:
        noDump=False
    if argv.count('-x')>0:
        xlsxMode=True
        import openpyxl
        argv.remove('-x')
    else:
        xlsxMode=False

    if len(argv)<2 or argv[1]=='':
        print ("Usage: "+argv[0][argv[0].rfind("\\")+1:]+" <dbs file> [-a/-x]")
        return False

    try:
        f=open(argv[1],'rb')
        f.read()
        f.close()
    except:
        return False
    
    dbs=open(argv[1],'rb')
    head=dbs.read(4)
    if head==b'\x00\x00\x00\x00':
        isUTF=False
    else:
        isUTF=True
    data=dbs.read()
    dbs.close()
    dataA=Decrypt5(data)
    '''
    output=open(argv[1]+'.dec','wb')
    output.write(dataA)
    output.close()
    '''
    compSize,decompSize=struct.unpack('2I',dataA[:8])
    dataB=Decompress(dataA[8:],decompSize)
    dataC=Decrypt3(dataB)
    
    if xlsxMode:
        file=BytesIO(dataC)
    else:
        output=open(argv[1]+'.out','wb')
        output.write(dataC)
        output.close()
        file=open(argv[1]+'.out','rb')

    header=Header(file)
    file.seek(header.lineIndexOffset)
    lineIndex=struct.unpack('%di'%header.lineCount,file.read(header.lineCount*4))

    dataIndex=[]
    dataType=[]
    for n in range(0,header.dataCount):
        dataIndex.append(struct.unpack('I',file.read(4))[0])
        dataType.append(struct.unpack('I',file.read(4))[0])
    lineData=[]
    for m in range(0,header.lineCount):
        lineData.append([])
        for n in range(0,header.dataCount):
            tempData=struct.unpack('I',file.read(4))[0]
            if dataType[n]==0x53:
                tempTell=file.tell()
                file.seek(header.textOffset+tempData)
                tempString=b''
                if isUTF:
                    tempChar=file.read(2)
                    while tempChar!=b'\x00\x00' and tempChar!=b'':
                        tempString+=tempChar
                        tempChar=file.read(2)
                    lineData[m].append(tempString.decode("UTF-16"))
                else:
                    tempChar=file.read(1)
                    while tempChar!=b'\x00' and tempChar!=b'':
                        tempString+=tempChar
                        tempChar=file.read(1)
                    lineData[m].append(tempString.decode("Shift-JIS"))
                file.seek(tempTell)
            else:
                lineData[m].append(tempData)
    file.seek(header.fileSize)
    dummy=file.read()
    file.close()
    
    if xlsxMode:
        xls=argv[1]+".xlsx"
        workBook=openpyxl.Workbook()
        workSheet=workBook.active
        workSheet.title="Translation"
        workSheet.column_dimensions['A'].width=10
        tempIndex=["#DATANO"]
        tempType=["#DATATYPE"]
        for i,j in zip(dataIndex,dataType):
            tempIndex.append(i)
            if j==0x53:
                tempType.append("S")
            else:
                tempType.append("V")
        workSheet.append(tempIndex)
        workSheet.append(tempType)
        for l in range(0,header.lineCount):
            tempLine=[lineIndex[l],*lineData[l]]
            workSheet.append(tempLine)
        workSheetCopy=workBook.copy_worksheet(workSheet)
        workSheetCopy.title="Text"
        workBook.save(xls)
    else:
        txt=open(argv[1]+'.txt','w',1,"UTF-16")  
        if isUTF:
            txt.write('Unicode\n')
        else:
            txt.write('ASCII\n')
        for m in range(0,header.lineCount):
            txt.write('[%.4d]\n'%lineIndex[m])
            for n in range(0,header.dataCount):
                tempIndex=dataIndex[n]
                tempData=lineData[m][n]
                if dataType[n]==0x53 and (tempData!='' or noDump):
                    txt.write('○%.2d○'%tempIndex+tempData+'\n●%.2d●'%tempIndex+lineData[m][n]+'\n\n')
                
                #int data:
                elif dataType[n]==0x56 and noDump:
                    txt.write('{%.2d}'%tempIndex+str(tempData)+'\n<%.2d>'%tempIndex+str(tempData)+'\n\n')
                
            txt.write('\n')
        txt.close()
        
    return True
Example #58
0
    # start reading frame
    rCam1, frameCam1 = streamCam1.read()

    nowTime = time.time()
    if (int(nowTime - startTime)) >= fpsLimit:
        print("current time: " + str(nowTime))
        # for see the frame coming from IP Camera, can use imshow()
        #cv2.imshow('IP Camera stream', frame)
        # for the store frame w,th .jps ext on your local
        #cv2.imwrite('C:/Users/zekeriyya/Desktop/NodeVideo/test/cam' + timeStr + '.jpg', frame)

        ## write frames data to FTP server
        ftp = FTP('DOMAIN-NAME', 'USERNAME', 'PASSWD')

        # store Cam1 frame on memory not local
        retval, buffer = cv2.imencode('.jpg', frameCam1)
        file = BytesIO(buffer)
        ftp.storbinary('STOR /httpdocs/cam1/camera1.jpg',
                       file)  # send the file
        file.close()  # close file and FTP

        ftp.quit()

        startTime = time.time()  # reset time

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

stream.release()
cv2.destroyAllWindows()
Example #59
0
class Deflater(object):
    '''
    File-like interface to zlib compression. The data is actually not
    compressed unless the compressed form is smaller than the uncompressed
    data.
    '''
    def __init__(self, compress=True, compress_level=9):
        '''
        Initialize a Deflater. The compress argument determines how to
        compress.
        '''
        self._data = BytesIO()
        if compress is True:
            compress = JAR_DEFLATED
        elif compress is False:
            compress = JAR_STORED
        self.compress = compress
        if compress in (JAR_DEFLATED, JAR_BROTLI):
            if compress == JAR_DEFLATED:
                self._deflater = zlib.compressobj(compress_level,
                                                  zlib.DEFLATED, -MAX_WBITS)
            else:
                self._deflater = BrotliCompress()
            self._deflated = BytesIO()
        else:
            assert compress == JAR_STORED
            self._deflater = None
        self.crc32 = 0

    def write(self, data):
        '''
        Append a buffer to the Deflater.
        '''
        if isinstance(data, memoryview):
            data = data.tobytes()
        data = six.ensure_binary(data)
        self._data.write(data)

        if self.compress:
            if self._deflater:
                self._deflated.write(self._deflater.compress(data))
            else:
                raise JarWriterError("Can't write after flush")

        self.crc32 = zlib.crc32(data, self.crc32) & 0xffffffff

    def close(self):
        '''
        Close the Deflater.
        '''
        self._data.close()
        if self.compress:
            self._deflated.close()

    def _flush(self):
        '''
        Flush the underlying zlib compression object.
        '''
        if self.compress and self._deflater:
            self._deflated.write(self._deflater.flush())
            self._deflater = None

    @property
    def compressed(self):
        '''
        Return whether the data should be compressed.
        '''
        return self._compressed_size < self.uncompressed_size

    @property
    def _compressed_size(self):
        '''
        Return the real compressed size of the data written to the Deflater. If
        the Deflater is set not to compress, the uncompressed size is returned.
        Otherwise, the actual compressed size is returned, whether or not it is
        a win over the uncompressed size.
        '''
        if self.compress:
            self._flush()
            return self._deflated.tell()
        return self.uncompressed_size

    @property
    def compressed_size(self):
        '''
        Return the compressed size of the data written to the Deflater. If the
        Deflater is set not to compress, the uncompressed size is returned.
        Otherwise, if the data should not be compressed (the real compressed
        size is bigger than the uncompressed size), return the uncompressed
        size.
        '''
        if self.compressed:
            return self._compressed_size
        return self.uncompressed_size

    @property
    def uncompressed_size(self):
        '''
        Return the size of the data written to the Deflater.
        '''
        return self._data.tell()

    @property
    def compressed_data(self):
        '''
        Return the compressed data, if the data should be compressed (real
        compressed size smaller than the uncompressed size), or the
        uncompressed data otherwise.
        '''
        if self.compressed:
            return self._deflated.getvalue()
        return self._data.getvalue()
Example #60
0
    def read(self, extension=None, quality=None):  # NOQA
        # returns image buffer in byte format.

        img_buffer = BytesIO()
        requested_extension = extension or self.extension

        # 1 and P mode images will be much smaller if converted back to
        # their original mode. So let's do that after resizing. Get $$.
        if self.context.config.PILLOW_PRESERVE_INDEXED_MODE and requested_extension in [None, '.png', '.gif'] \
                and self.original_mode in ['P', '1'] and self.original_mode != self.image.mode:
            if self.original_mode == '1':
                self.image = self.image.convert('1')
            else:
                # libimagequant might not be enabled on compile time
                # but it's better than default octree for RGBA images, so worth a try
                quantize_default = True
                try:
                    # Option available since Pillow 3.3.0
                    if hasattr(Image, 'LIBIMAGEQUANT'):
                        self.image = self.image.quantize(
                            method=Image.LIBIMAGEQUANT)
                        quantize_default = False
                except ValueError as ex:
                    if 'dependency' not in str(ex).lower():
                        raise

                if quantize_default:
                    self.image = self.image.quantize()

        ext = requested_extension or self.get_default_extension()

        options = {'quality': quality}
        if ext == '.jpg' or ext == '.jpeg':
            options['optimize'] = True
            if self.context.config.PROGRESSIVE_JPEG:
                # Can't simply set options['progressive'] to the value
                # of self.context.config.PROGRESSIVE_JPEG because save
                # operates on the presence of the key in **options, not
                # the value of that setting.
                options['progressive'] = True

            if self.image.mode != 'RGB':
                self.image = self.image.convert('RGB')
            else:
                subsampling_config = self.context.config.PILLOW_JPEG_SUBSAMPLING
                qtables_config = self.context.config.PILLOW_JPEG_QTABLES

                if subsampling_config is not None or qtables_config is not None:
                    options[
                        'quality'] = 0  # can't use 'keep' here as Pillow would try to extract qtables/subsampling and fail
                    orig_subsampling = self.subsampling
                    orig_qtables = self.qtables

                    if (subsampling_config == 'keep' or
                            subsampling_config is None) and (orig_subsampling
                                                             is not None):
                        options['subsampling'] = orig_subsampling
                    else:
                        options['subsampling'] = subsampling_config

                    if (qtables_config == 'keep' or qtables_config is None
                        ) and (orig_qtables and 2 <= len(orig_qtables) <= 4):
                        options['qtables'] = orig_qtables
                    else:
                        options['qtables'] = qtables_config

        if ext == '.png' and self.context.config.PNG_COMPRESSION_LEVEL is not None:
            options[
                'compress_level'] = self.context.config.PNG_COMPRESSION_LEVEL

        if options['quality'] is None:
            options['quality'] = self.context.config.QUALITY

        if self.icc_profile is not None:
            options['icc_profile'] = self.icc_profile

        if self.context.config.PRESERVE_EXIF_INFO:
            if self.exif is not None:
                options['exif'] = self.exif

        try:
            if ext == '.webp':
                if self.image.mode not in ['RGB', 'RGBA']:
                    if self.image.mode == 'P':
                        mode = 'RGBA'
                    else:
                        mode = 'RGBA' if self.image.mode[-1] == 'A' else 'RGB'
                    self.image = self.image.convert(mode)

            if ext in ['.png', '.gif'] and self.image.mode == 'CMYK':
                self.image = self.image.convert('RGBA')

            self.image.format = FORMATS.get(
                ext, FORMATS[self.get_default_extension()])
            self.image.save(img_buffer, self.image.format, **options)
        except IOError:
            logger.exception(
                'Could not save as improved image, consider to increase ImageFile.MAXBLOCK'
            )
            self.image.save(img_buffer, FORMATS[ext])

        results = img_buffer.getvalue()
        img_buffer.close()
        self.extension = ext
        return results