예제 #1
0
def email_admin(self, subject, text, sorted_self):

    site_name = SiteSetting.objects.all().first().site_name

    styleSheet = getSampleStyleSheet()

    # Send the admin a PDF of client details
    response = HttpResponse(content_type='application/pdf')
    response['Content-Disposition'] = 'attachment; filename="clientDetails.pdf"'

    string_buffer = StringIO()

    new_pdf = []
    header = Paragraph(site_name + " Attendee Details", styleSheet['Heading1'])
    new_pdf.append(header)

    for element in sorted_self:
        new_pdf.append(Paragraph(element[0], styleSheet['Heading3']))
        new_pdf.append(Paragraph(element[1], styleSheet['BodyText']))
        new_pdf.append(Spacer(1, 2))

    doc = SimpleDocTemplate(string_buffer)
    doc.build(new_pdf)
    pdf = string_buffer.getvalue()
    string_buffer.close()

    msg = EmailMultiAlternatives(subject, text, EMAIL_HOST_USER, [EMAIL_HOST_USER])
    msg.attach(self.first_name + self.last_name + site_name + ".pdf", pdf, "application/pdf")
    msg.send(fail_silently=True)
예제 #2
0
파일: srj.py 프로젝트: ox-it/humfrey
    def _iter(self, sparql_results_type, fields, bindings, boolean, triples):
        if sparql_results_type not in ('resultset', 'boolean'):
            raise TypeError("Unexpected results type: {0}".format(sparql_results_type))

        # We'll spool to a buffer, and only yield when it gets a bit big.
        buffer = StringIO()

        # Do these attribute lookups only once.
        json_dumps, json_dump, buffer_write = json.dumps, json.dump, buffer.write

        buffer_write('{\n')
        if sparql_results_type == 'boolean':
            buffer_write('  "head": {},\n')
            buffer_write('  "boolean": %s' % ('true' if boolean else 'false'))
        elif sparql_results_type == 'resultset':
            buffer_write('  "head": {\n')
            buffer_write('    "vars": [ %s ]\n' % ', '.join(json_dumps(field) for field in fields))
            buffer_write('  },\n')
            buffer_write('  "results": {\n')
            buffer_write('    "bindings": [\n')
            for i, binding in enumerate(bindings):
                buffer_write('      {' if i == 0 else ',\n      {')
                j = 0
                for field in fields:
                    value = binding.get(field)
                    if value is None:
                        continue
                    buffer_write(',\n        ' if j > 0 else '\n        ')
                    json_dump(field, buffer)
                    if isinstance(value, rdflib.URIRef):
                        buffer_write(': { "type": "uri"')
                    elif isinstance(value, rdflib.BNode):
                        buffer_write(': { "type": "bnode"')
                    elif value.datatype is not None:
                        buffer_write(': { "type": "typed-literal", "datatype": ')
                        json_dump(value.datatype, buffer)
                    elif value.language is not None:
                        buffer_write(': { "type": "literal", "xml:lang": ')
                        json_dump(value.language, buffer)
                    else:
                        buffer_write(': { "type": "literal"')
                    buffer_write(', "value": ')
                    json_dump(value, buffer)
                    buffer_write(' }')

                    j += 1

                buffer_write('\n      }')
            buffer_write('\n    ]')
            buffer_write('\n  }')


            if buffer.tell() > 65000: # Almost 64k
                yield buffer.getvalue()
                buffer.seek(0)
                buffer.truncate()

        buffer_write('\n}')
        yield buffer.getvalue()
        buffer.close()
예제 #3
0
파일: utils.py 프로젝트: foonnnnn/image
def scale(data, width, height, overlay=None):
	"""Rescale the given image, optionally cropping it to make sure the result image has the specified width and height."""
	import Image as pil
	from cStringIO import StringIO
	
	max_width = width
	max_height = height

	input_file = StringIO(data)
	img = pil.open(input_file)
	
	if img.mode != "RGBA":
		img = img.convert("RGBA")
	
	src_width, src_height = img.size
	src_ratio = float(src_width) / float(src_height)
	dst_width = max_width
	dst_height = dst_width / src_ratio
	
	if dst_height > max_height:
		dst_height = max_height
		dst_width = dst_height * src_ratio
	
	img = img.resize((int(dst_width), int(dst_height)), pil.ANTIALIAS)
		
	tmp = StringIO()
	do_overlay(img, overlay)
	
	img.save(tmp, 'PNG')
	tmp.seek(0)
	output_data = tmp.getvalue()
	input_file.close()
	tmp.close()
	
	return output_data
예제 #4
0
def print_rep(uid):
    registerFont(TTFont('DroidSans', 'DroidSans.ttf'))

    pdf = StringIO()

    doc = SimpleDocTemplate(pdf, pagesize=A4)
    elements = []
    style = getSampleStyleSheet()
    style.add(ParagraphStyle(name='Header', alignment=TA_LEFT,
                             fontName='DroidSans',
                             fontSize=14, leading=16))
    style.add(ParagraphStyle(name='Left', alignment=TA_LEFT,
                             fontName='DroidSans',
                             fontSize=12))
    style.add(ParagraphStyle(name='Right', alignment=TA_RIGHT,
                             fontName='DroidSans',
                             fontSize=12))
    if uid == 0:
        elements.append(Paragraph(u'<u>Users List</u>', style['Header']))
        u = User.query.all()     
        for i, o in enumerate(u):
            elements.append(Paragraph(u'%s. %s %s %s' % (i+1, o.name, o.email, o.progress), style['Left']))
    else:
        u = User.query.get(uid)
        elements.append(Paragraph(u'%s %s %s' % (u.name, u.email, u.progress), style['Header']))

    doc.build(elements)
    pdf_file = pdf.getvalue()
    pdf.close()
    response = make_response(pdf_file)

    response.headers['Content-Disposition'] = "attachment; filename='pdf_user.pdf"
    response.mimetype = 'application/pdf'
    return response
예제 #5
0
def get_build_info():
    """Returns a string containing the build info."""
    global __build_info__
    if __build_info__ is not None:
        return __build_info__

    build_info_buffer = StringIO()
    original_dir = os.getcwd()

    try:
        # We need to execute the git command in the source root.
        os.chdir(__source_root__)
        # Add in the e-mail address of the user building it.
        (_, packager_email) = run_command('git config user.email', exit_on_fail=True, command_name='git')
        print >>build_info_buffer, 'Packaged by: %s' % packager_email.strip()

        # Determine the last commit from the log.
        (_, commit_id) = run_command('git log --summary -1 | head -n 1 | cut -d \' \' -f 2',
                                     exit_on_fail=True, command_name='git')
        print >>build_info_buffer, 'Latest commit: %s' % commit_id.strip()

        # Include the branch just for safety sake.
        (_, branch) = run_command('git branch | cut -d \' \' -f 2', exit_on_fail=True, command_name='git')
        print >>build_info_buffer, 'From branch: %s' % branch.strip()

        # Add a timestamp.
        print >>build_info_buffer, 'Build time: %s' % strftime("%Y-%m-%d %H:%M:%S UTC", gmtime())

        __build_info__ = build_info_buffer.getvalue()
        return __build_info__
    finally:
        os.chdir(original_dir)

        if build_info_buffer is not None:
            build_info_buffer.close()
예제 #6
0
def rrset_to_text(m):
    s = StringIO()

    try:
        if 'bailiwick' in m:
            s.write(';;  bailiwick: %s\n' % m['bailiwick'])

        if 'count' in m:
            s.write(';;      count: %s\n' % locale.format('%d', m['count'], True))

        if 'time_first' in m:
            s.write(';; first seen: %s\n' % sec_to_text(m['time_first']))
        if 'time_last' in m:
            s.write(';;  last seen: %s\n' % sec_to_text(m['time_last']))

        if 'zone_time_first' in m:
            s.write(';; first seen in zone file: %s\n' % sec_to_text(m['zone_time_first']))
        if 'zone_time_last' in m:
            s.write(';;  last seen in zone file: %s\n' % sec_to_text(m['zone_time_last']))

        if 'rdata' in m:
            for rdata in m['rdata']:
                s.write('%s IN %s %s\n' % (m['rrname'], m['rrtype'], rdata))

        s.seek(0)
        return s.read()
    finally:
        s.close()
예제 #7
0
class SFTPStorageFile(File):

    def __init__(self, name, storage, mode):
        self._name = name
        self._storage = storage
        self._mode = mode
        self._is_dirty = False
        self.file = StringIO()
        self._is_read = False

    @property
    def size(self):
        if not hasattr(self, '_size'):
            self._size = self._storage.size(self._name)
        return self._size

    def read(self, num_bytes=None):
        if not self._is_read:
            self.file = self._storage._read(self._name)
            self._is_read = True

        return self.file.read(num_bytes)

    def write(self, content):
        if 'w' not in self._mode:
            raise AttributeError("File was opened for read-only access.")
        self.file = StringIO(content)
        self._is_dirty = True
        self._is_read = True

    def close(self):
        if self._is_dirty:
            self._storage._save(self._name, self.file.getvalue())
        self.file.close()
예제 #8
0
파일: helpers.py 프로젝트: jramosdc/JobBot
def convert(url, pages=None):
    assert isinstance(url, basestring)
    assert pages == None or isinstance(pages, list)

    rscmng = PDFResourceManager()
    retstr = StringIO()
    device = TextConverter(rscmng, retstr, codec='utf-8', laparams=LAParams())
    web_page = urllib2.urlopen(urllib2.Request(url))
    fp = StringIO(web_page.read())
    interpreter = PDFPageInterpreter(rscmng, device)

    pdf_pages = PDFPage.get_pages(
        fp,
        set(pages if pages != None else []),
        maxpages=0,
        password='',
        caching=True,
        check_extractable=True
    )

    for page in pdf_pages:
        interpreter.process_page(page)

    result = retstr.getvalue()

    fp.close()
    web_page.close()
    device.close()
    retstr.close()

    return result
예제 #9
0
파일: dill.py 프로젝트: brstrat/dill
def _create_stringo(value, position, closed):
    f = StringIO()
    if closed: f.close()
    else:
       f.write(value)
       f.seek(position)
    return f
예제 #10
0
    def profile(self, request):
        """Start/stop the python profiler, returns profile results"""
        profile = self.__dict__.get("_profile")
        if "start" in request.properties:
            if not profile:
                profile = self.__dict__["_profile"] = Profile()
            profile.enable()
            self._log(LOG_INFO, "Started python profiler")
            return (OK, None)
        if not profile:
            raise BadRequestStatus("Profiler not started")
        if "stop" in request.properties:
            profile.create_stats()
            self._log(LOG_INFO, "Stopped python profiler")
            out = StringIO()
            stats = pstats.Stats(profile, stream=out)
            try:
                stop = request.properties["stop"]
                if stop == "kgrind":  # Generate kcachegrind output using pyprof2calltree
                    from pyprof2calltree import convert

                    convert(stats, out)
                elif stop == "visualize":  # Start kcachegrind using pyprof2calltree
                    from pyprof2calltree import visualize

                    visualize(stats)
                else:
                    stats.print_stats()  # Plain python profile stats
                return (OK, out.getvalue())
            finally:
                out.close()
        raise BadRequestStatus("Bad profile request %s" % (request))
예제 #11
0
 def test_draw_ascii(self):
     """Tree to Graph conversion, if networkx is available."""
     handle = StringIO()
     tree = Phylo.read(EX_APAF, 'phyloxml')
     Phylo.draw_ascii(tree, file=handle)
     Phylo.draw_ascii(tree, file=handle, column_width=120)
     handle.close()
예제 #12
0
파일: g15profile.py 프로젝트: FPar/gnome15
 def export(self, filename):
     """
     Save this profile in a format that may be transmitted to another
     computer (as a zip file). All references to external images (for icon and background)
     are made relative and added to the archive.
     
     Keyword arguments:
     filename    --    file to save copy to
     """
     profile_copy = get_profile(self.device, self.id)
     
     archive_file = zipfile.ZipFile(filename, "w", compression = zipfile.ZIP_DEFLATED)
     try:
         # Icon
         if profile_copy.icon and os.path.exists(profile_copy.icon):
             base_path = "%s.resources/%s" % ( profile_copy.id, os.path.basename(profile_copy.icon) )
             archive_file.write(profile_copy.icon, base_path )  
             profile_copy.icon = base_path
             
         # Background            
         if profile_copy.background and os.path.exists(profile_copy.background):
             base_path = "%s.resources/%s" % ( profile_copy.id, os.path.basename(profile_copy.background) )
             archive_file.write(profile_copy.background, base_path)  
             profile_copy.background = base_path
             
         # Profile
         profile_data = StringIO()
         try:
             profile_copy.save(profile_data)
             archive_file.writestr("%s.macros" % profile_copy.id, profile_data.getvalue())
         finally:
             profile_data.close()
     finally:
         archive_file.close()
예제 #13
0
    def do_POST(self):
        if not self.authenticate():
            return
        """Serve a POST request."""
        r, info, meta = self.deal_post_data()
        res = 'Success' if r else 'Failure'
        log.info("Upload {} {} by: {}".format(res, info, self.client_address))
        f = StringIO()
        ref = self.headers.get('referer', 'None')

        response = {'result': res,
                    'referer': ref,
                    'info': info}
        result = """
        <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
        <html><title>Upload Result Page</title>
        <body><h2>Upload Result Page</h2>
        <hr>
        <strong>{result}:</strong>
        {info}
        <br><a href="{referer}">back</a>"
        </body></html>
        """
        f.write(result.format(**response))
        length = f.tell()
        f.seek(0)
        self.send_response(200)
        self.send_header("Content-type", "text/html")
        self.send_header("Content-Length", str(length))
        self.end_headers()
        if f:
            self.copyfile(f, self.wfile)
            f.close()
예제 #14
0
def tricks_generate_yaml(args):
    """
    Subcommand to generate Yaml configuration for tricks named on the command
    line.

    :param args:
        Command line argument options.
    """
    python_paths = path_split(args.python_path)
    add_to_sys_path(python_paths)
    output = StringIO()

    for trick_path in args.trick_paths:
        TrickClass = load_class(trick_path)
        output.write(TrickClass.generate_yaml())

    content = output.getvalue()
    output.close()

    header = yaml.dump({CONFIG_KEY_PYTHON_PATH: python_paths})
    header += "%s:\n" % CONFIG_KEY_TRICKS
    if args.append_to_file is None:
        # Output to standard output.
        if not args.append_only:
            content = header + content
        sys.stdout.write(content)
    else:
        if not os.path.exists(args.append_to_file):
            content = header + content
        with open(args.append_to_file, 'ab') as output:
            output.write(content)
예제 #15
0
    def generate(self, storage, original, size, miniature):
        try:
            image = Image.open(StringIO(storage.open(original).read()))
        except:
            # PIL raises a plethora of Exceptions if reading the image
            # is not possible. Since we cannot be sure what Exception will
            # happen, catch them all so the thumbnailer will never fail.
            return storage.url(original)

        storage.delete(miniature)

        # defining the size
        w, h = int(size['w']), int(size['h'])

        format = image.format # Save format for the save() call later
        image.thumbnail([w, h], Image.ANTIALIAS)
        buf = StringIO()
        if image.mode not in ('RGBA', 'RGB', 'L'):
            image = image.convert('RGBA')
        image.save(buf, format or 'jpeg', quality=100)
        raw_data = buf.getvalue()
        buf.close()
        storage.save(miniature, ContentFile(raw_data))

        return storage.url(miniature)
예제 #16
0
    def execute(self, server):
        ts = State(self.suite_ini, server.admin, TarantoolServer)
        cmd = None

        def send_command(command):
            result = ts.curcon[0](command, silent=True)
            for conn in ts.curcon[1:]:
                conn(command, silent=True)
            return result

        for line in open(self.name, 'r'):
            if not cmd:
                cmd = StringIO()
            if line.find('--#') == 0:
                rescom = cmd.getvalue().replace('\n\n', '\n')
                if rescom:
                    sys.stdout.write(cmd.getvalue())
                    result = send_command(rescom)
                    sys.stdout.write(result.replace("\r\n", "\n"))
                sys.stdout.write(line)
                ts(line)
            elif line.find('--') == 0:
                sys.stdout.write(line)
            else:
                if line.strip() or cmd.getvalue():
                    cmd.write(line)
                delim_len = -len(ts.delimiter) if len(ts.delimiter) else None
                if line.endswith(ts.delimiter+'\n') and cmd.getvalue().strip()[:delim_len].strip():
                    sys.stdout.write(cmd.getvalue())
                    rescom = cmd.getvalue()[:delim_len].replace('\n\n', '\n')
                    result = send_command(rescom)
                    sys.stdout.write(result.replace("\r\n", "\n"))
                    cmd.close()
                    cmd = None
        ts.flush()
예제 #17
0
 def send_tryton_url(self, path):
     self.send_response(300)
     hostname = CONFIG['hostname'] or unicode(socket.getfqdn(), 'utf8')
     hostname = '.'.join(encodings.idna.ToASCII(part) for part in
         hostname.split('.'))
     values = {
         'hostname': hostname,
         'path': path,
         }
     content = StringIO()
     content.write('<html')
     content.write('<head>')
     content.write('<meta http-equiv="Refresh" '
         'content="0;url=tryton://%(hostname)s%(path)s"/>' % values)
     content.write('<title>Moved</title>')
     content.write('</head>')
     content.write('<body>')
     content.write('<h1>Moved</h1>')
     content.write('<p>This page has moved to '
         '<a href="tryton://%(hostname)s%(path)s">'
         'tryton://%(hostname)s%(path)s</a>.</p>' % values)
     content.write('</body>')
     content.write('</html>')
     length = content.tell()
     content.seek(0)
     self.send_header('Location', 'tryton://%(hostname)s%(path)s' % values)
     self.send_header('Content-type', 'text/html')
     self.send_header('Content-Length', str(length))
     self.end_headers()
     self.copyfile(content, self.wfile)
     content.close()
예제 #18
0
파일: renderers.py 프로젝트: turkus/seth
    def __call__(self, value, system):
        from xhtml2pdf import pisa

        request = system['request']
        buff = StringIO()

        if not 'template' in value and not 'html' in value:
            raise SethRendererException(u"No template nor html provided")

        if not 'html' in value:
            try:
                html = self.render_template(value['template'], value, request)
            except ValueError:
                raise SethRendererException(u"Wrong renderer factory conf")
        else:
            html = value['html']

        try:
            pdf = pisa.CreatePDF(
                StringIO(html.encode('utf-8')), buff, encoding='utf-8'
            )
        except AttributeError:
            raise SethRendererException(u"Error generating PDF file.")

        if pdf.err:
            raise SethRendererException(u"Error generating PDF file.")

        file_name = value.pop('file_name', self.get_filename(value))
        self.prepare_response(request, 'application/pdf', file_name, value)
        result = buff.getvalue()
        buff.close()
        return result
class ZipString:
    def __init__(self):
        self.s = StringIO()
        self.zip = zipfile.ZipFile(self.s, "w", zipfile.ZIP_DEFLATED)
        self.isZipClosed = False
        self.isClosed = False
        
    def add(self, filename, data):
        if self.isZipClosed:
            raise Exception("Cannot add to a closed ZipString")
        self.zip.writestr(filename, data)
    
    def close(self):
        """ closes the ZipString and release all data."""
        if self.isClosed==False:
            self.__closezip()
            self.s.close()
            self.isClosed = True
            
    def __closezip(self):
        for zinfo in self.zip.filelist:
            zinfo.external_attr = 0666 << 16L       # or 0777
        
        if self.isZipClosed==False:
            self.zip.close()
            self.isZipClosed = True
            
    def getZipData(self):
        self.__closezip()
        if self.isClosed:
            raise Exception("Cannot getZipData when ZipString is closed!")
        return self.s.getvalue()
예제 #20
0
파일: converters.py 프로젝트: cirocco/Sylva
    def export(self):
        graph = self.graph
        csv_results = self.csv_results
        query_name = self.query_name
        headers = csv_results[0]
        results = csv_results[1:]

        zip_buffer = StringIO()

        with zipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) as \
                zip_file:
            csv_name = os.path.join('query', query_name + '.csv')
            csv_buffer = StringIO()
            csv_writer = csv.writer(csv_buffer, delimiter=',',
                                    quotechar='"', quoting=csv.QUOTE_ALL)
            csv_header = headers
            csv_writer.writerow(csv_header)
            for result in results:
                csv_writer.writerow(result)
            zip_file.writestr(csv_name, csv_buffer.getvalue())
            csv_buffer.close()

        zip_data = zip_buffer.getvalue()
        zip_buffer.close()
        zip_name = graph.slug + '.zip'

        return zip_data, zip_name
예제 #21
0
파일: F9.py 프로젝트: gisce/libCNMC
 def writer(self):
     if self.file_output:
         fio = open(self.file_output, 'wb')
     else:
         fio = StringIO()
     while True:
         try:
             item = self.output_q.get()
             if item == 'STOP':
                 break
             msg = map(
                 lambda x: type(x) == unicode and x.encode('utf-8') or x,
                 item
             )
             fio.write(str(msg[0])+'\n')
         except Exception:
             traceback.print_exc()
             if self.raven:
                 self.raven.captureException()
         finally:
             self.output_q.task_done()
     fio.write('END')
     if not self.file_output:
         self.content = fio.getvalue()
     fio.close()
예제 #22
0
파일: plugin.py 프로젝트: fbesser/Limnoria
    def install(self, plugin):
        directories = conf.supybot.directories.plugins()
        directory = self._getWritableDirectoryFromList(directories)
        assert directory is not None
        dirname = ''.join((self._path, plugin))

        fileObject = urllib2.urlopen(self._downloadUrl)
        fileObject2 = StringIO()
        fileObject2.write(fileObject.read())
        fileObject.close()
        fileObject2.seek(0)
        archive = tarfile.open(fileobj=fileObject2, mode='r:gz')
        prefix = archive.getnames()[0]
        try:
            assert archive.getmember(prefix + dirname).isdir()

            for file in archive.getmembers():
                if file.name.startswith(prefix + dirname):
                    extractedFile = archive.extractfile(file)
                    newFileName = os.path.join(*file.name.split('/')[1:])
                    newFileName = newFileName[len(self._path)-1:]
                    newFileName = os.path.join(directory, newFileName)
                    if os.path.exists(newFileName):
                        assert os.path.isdir(newFileName)
                        shutil.rmtree(newFileName)
                    if extractedFile is None:
                        os.mkdir(newFileName)
                    else:
                        open(newFileName, 'a').write(extractedFile.read())
        finally:
            archive.close()
            fileObject2.close()
            del archive, fileObject, fileObject2
예제 #23
0
파일: kernel.py 프로젝트: jcsp/teuthology
def need_to_install_distro(ctx, role):
    """
    Installing kernels on rpm won't setup grub/boot into them.
    This installs the newest kernel package and checks its version
    and compares against current (uname -r) and returns true if newest != current.
    Similar check for deb.
    """
    (role_remote,) = ctx.cluster.only(role).remotes.keys()
    system_type = teuthology.get_system_type(role_remote)
    output, err_mess = StringIO(), StringIO()
    role_remote.run(args=['uname', '-r' ], stdout=output, stderr=err_mess )
    current = output.getvalue().strip()
    if system_type == 'rpm':
        role_remote.run(args=['sudo', 'yum', 'install', '-y', 'kernel' ], stdout=output, stderr=err_mess )
        #reset stringIO output.
        output, err_mess = StringIO(), StringIO()
        role_remote.run(args=['rpm', '-q', 'kernel', '--last' ], stdout=output, stderr=err_mess )
        newest=output.getvalue().split()[0]

    if system_type == 'deb':
        distribution = teuthology.get_system_type(role_remote, distro=True)
        newest = get_version_from_pkg(role_remote, distribution)

    output.close()
    err_mess.close()
    if current in newest:
        return False
    log.info('Not newest distro kernel. Curent: {cur} Expected: {new}'.format(cur=current, new=newest))
    return True
예제 #24
0
def crystalToDiffpyStructure(crystal):
    """Create a diffpy.Structure.Structure from a crystal.

    This requires diffpy.Structure to be installed. This uses file IO transfer
    data, so there is some inherent precision loss.

    Note that the resulting structure will be in P1 symmetry.

    """

    # Write the crystal to string and load it into a diffpy Structure

    from cStringIO import StringIO

    buf = StringIO()
    crystal.CIFOutput(buf)

    from diffpy.Structure import Structure

    stru = Structure()

    s = buf.getvalue()
    stru.readStr(s)
    buf.close()

    return stru
예제 #25
0
 def download_file(self, **kargs):
     file_id = kargs['file_id']
     Model = request.session.model('abc_ipt.com_file')
     files = Model.search_read([('id', '=', file_id)])
     if files:
         file = files[0]
     else:
         return '未发现该文件'
     tmp_file_name = file['tmp_file_name']
     file_name = file['file_name']
     path = os.path.abspath(os.path.dirname(sys.argv[0]))
     filepath = path.replace('\\', '/') + '/myaddons/abc_ipt/com_files/{}'.format(tmp_file_name)
     try:
         # 默认模式为‘r’,只读模式
         with open(filepath, 'rb') as f:
             contents = f.read()
     except Exception as e:
         _logger = logging.getLogger(__name__)
         _logger.error(str(e))
         return '读取文件失败'
     fo = StringIO()
     fo.write(contents)
     fo.seek(0)
     data = fo.read()
     fo.close()
     return request.make_response(data,
                      headers=[('Content-Disposition', content_disposition(file_name)),
                               ('Content-Type', 'application/octet-stream')],
                      )
예제 #26
0
파일: inout.py 프로젝트: pazur/final
 def write(self, seq):
     s = StringIO()
     try:
         Bio.SeqIO.write(seq, s, self.format)
         return s.getvalue()
     finally:
         s.close()
예제 #27
0
def filter():
    img_url = request.form.get('img-url')
    response = requests.get(img_url)
    img = Image.open(StringIO(response.content))
    img_gray = img.convert('L')
    w, h = img_gray.size
    aspect = 1.0*w/h
    if aspect > 1.0*WIDTH/HEIGHT:
        width = min(w, WIDTH)
        height = width/aspect
    else:
        height = min(h, HEIGHT)
        width = height*aspect
    io = StringIO()
    img_gray.save(io, format="png")
    contents = io.getvalue()
    io.close()
    img_str = base64.b64encode(contents)

    image = {
        'width': int(width),
        'height': int(height),
        'data': img_str
    }
    return jsonify(image)
예제 #28
0
파일: kernel.py 프로젝트: jcsp/teuthology
def get_version_from_pkg(remote, ostype):
    """
    Round-about way to get the newest kernel uname -r compliant version string
    from the virtual package which is the newest kenel for debian/ubuntu.
    """
    output, err_mess = StringIO(), StringIO()
    newest=''
    #Depend of virtual package has uname -r output in package name. Grab that.
    if 'debian' in ostype:
        remote.run(args=['sudo', 'apt-get', '-y', 'install', 'linux-image-amd64' ], stdout=output, stderr=err_mess )
        remote.run(args=['dpkg', '-s', 'linux-image-amd64' ], stdout=output, stderr=err_mess )
        for line in output.getvalue().split('\n'):
            if 'Depends:' in line:
                newest = line.split('linux-image-')[1]
                output.close()
                err_mess.close()
                return newest
     #Ubuntu is a depend in a depend.
    if 'ubuntu' in ostype:
        remote.run(args=['sudo', 'apt-get', '-y', 'install', 'linux-image-current-generic' ], stdout=output, stderr=err_mess )
        remote.run(args=['dpkg', '-s', 'linux-image-current-generic' ], stdout=output, stderr=err_mess )
        for line in output.getvalue().split('\n'):
            if 'Depends:' in line:
                depends = line.split('Depends: ')[1]
        remote.run(args=['dpkg', '-s', depends ], stdout=output, stderr=err_mess )
        for line in output.getvalue().split('\n'):
            if 'Depends:' in line:
                newest = line.split('linux-image-')[1]
                if ',' in newest:
                    newest = newest.split(',')[0]
    output.close()
    err_mess.close()
    return newest
예제 #29
0
파일: chunks.py 프로젝트: altoplano/qfs
    def printServerListToHTML(self, buffer):
        txtStream = StringIO()
        theLen = len(self.selectedHeaders)
        totalValue = [[0,0] for i in range(theLen)]
#beginning of table body
        print >> buffer, '''
     <tbody>'''
#table body
        for i in xrange(len(self.data1.chunkServerData.chunkServers)):
            if i%2 == 1:
                trclass = "class=odd"
            else: 
                trclass = ""    
            self.printServerToHTML(txtStream, i, trclass,totalValue)

#total value
        print >> buffer, '''
        <tr class="totalCls">''',
        for val in totalValue:
            if val[1] == 0:
                s="n/a"
            else:
                s= "%.2e" % (val[0])
            print >> buffer, '''
            <td align="center">%s</td>''' % (s),
        print >> buffer,'''</tr>'''
        
#all other values
        print >> buffer, txtStream.getvalue()
        txtStream.close()

#end of table body & table
        print >> buffer, '''
예제 #30
0
def get_pdf_text(path):
    """ Reads a pdf file and returns a dict of the text where the
        index represents the page number.
        http://stackoverflow.com/a/20905381
    """
    rsrcmgr = PDFResourceManager()
    retstr = StringIO()
    # change to to utf-8 if the text comes out garbled
    codec = 'ascii'
    #codec = 'utf-8'
    laparams = LAParams()
    pages = {}
    device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams, showpageno=True, pages=pages)
    fp = file(path, 'rb')
    interpreter = PDFPageInterpreter(rsrcmgr, device)
    password = ""
    maxpages = 0
    caching = True
    pagenos=set()
    for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True):
        interpreter.process_page(page)
    fp.close()
    device.close()
    retstr.close()
    return pages
예제 #31
0
파일: xml.py 프로젝트: g91/Bfbc2Edit
def toxml(filename):
    if filename[-4:] != ".dbx": return
    fi = open(filename, "rb")
    if fi.read(8) != "{binary}":
        fi.close()
        return
    print filename
    global f
    f = StringIO(fi.read())  # dump the file in memory
    fi.close()
    out = open(filename[:-3] + "xml", "wb")
    out.write(XMLHEADER)  ###
    # offset to PAYLOAD; relative offset is 24 less
    totoffset, zero, reloffset, numofstrings = unpack(">IIII", f.read(16))
    stringoffsets = unpack(">" + "I" * numofstrings, f.read(4 * numofstrings))
    #calculate the length of the strings and grab them
    lengths = [
        stringoffsets[i + 1] - stringoffsets[i]
        for i in xrange(numofstrings - 1)
    ]
    lengths.append(reloffset - 4 * numofstrings - stringoffsets[-1])
    strings = [f.read(l)[:-1] for l in lengths]

    # do payload
    tablevel = 0
    opentags = [
    ]  # need the prefixes to close the tag, e.g. <array> -> </array>
    try:
        while 1:
            # example entry: 4ca30490014b28506f00 -> 4c(prefix) a(type) 3(numofattribs) 04 9001 4b 28 50 6f (3 attribs with 2 parts each) 00 (null)
            # -> <strings[4c] strings[04]="strings[9001]" strings[4b]="strings[28]" strings[50]="strings[6f]">
            prefixnumber = read128()
            if prefixnumber == 0:
                tablevel -= 1
                out.write(tablevel * TABLEN + "</" + opentags.pop() + ">\r\n")
                continue
            prefix = strings[prefixnumber]
            typ, numofattrib = hexlify(f.read(1))
            numofattrib = int(numofattrib)

            attribs = [[strings[read128()], strings[read128()]]
                       for i in xrange(numofattrib)]
            if numofattrib:
                tag = tablevel * TABLEN + "<" + prefix + " " + " ".join(
                    [attrib[0] + '="' + attrib[1] + '"' for attrib in attribs])
            else:
                tag = tablevel * TABLEN + "<" + prefix

            if typ == "a":  # contains other elements
                f.seek(1, 1)  #null
                tablevel += 1
                opentags.append(prefix)
                out.write(tag + ">\r\n")

            elif typ == "2":
                content = strings[read128()]
                if content:
                    out.write(tag + ">" + content + "</" + prefix + ">\r\n")
                else:
                    out.write(
                        tag + " />\r\n"
                    )  # close bracket immediately for \x00 (empty) content

            elif typ == "7":
                numofnums, numlength = read128(), read128()
                if numlength == 4:
                    # need to go through every single number and evaluate whether int or float
                    if numofnums % 4 == 0 and numofnums:
                        contentlist = [None] * numofnums
                        for i in xrange(numofnums):
                            rawnum = f.read(4)
                            if i % 4 == 3:
                                hexnum = hexlify(rawnum)
                                if hexnum == "00000000":
                                    contentlist[i] = ("*zero*")
                                elif hexnum == "cdcdcdcd":
                                    contentlist[i] = ("*nonzero*")
                                else:
                                    contentlist[i] = (intfloat(
                                        rawnum, attribs[0][1]))
                            else:
                                contentlist[i] = (intfloat(
                                    rawnum, attribs[0][1]))
                        content = "/".join(contentlist)
                    else:
                        content = "/".join([
                            intfloat(f.read(4), attribs[0][1])
                            for x in xrange(numofnums)
                        ])

                elif numlength == 8:
                    content = "/".join([
                        ` x `
                        for x in unpack(">" +
                                        "d" * numofnums, f.read(8 * numofnums))
                    ])
                else:
                    content = "/".join([
                        ` x `
                        for x in unpack(">" +
                                        "H" * numofnums, f.read(2 * numofnums))
                    ])
                out.write(tag + ">" + content + "</" + prefix + ">\r\n")

            else:  #typ 6
                f.seek(1, 1)  #\x01
                bol = f.read(1)
                if bol == "\x01": content = "true"
                elif bol == "\x00": content = "false"
                else:
                    content = str(
                        ord(bol)
                    )  # <field name="ChannelCount"> is stored here with values 2,4,6. Needs some testing.
                out.write(tag + ">" + content + "</" + prefix + ">\r\n")
    except:
        f.close()
        out.close()
예제 #32
0
    def print_budget(self, cr, uid, ids, context=None):

        # déclaration des vars
        i = 6  # i = décalage des lignes par rapport à la première ligne du fichier excel
        j = 1  # j = décalage des lignes par rapport à la première cellule du fichier excel
        cell_tab = 9  # total cellule tableau SB
        row_tab = 8  # total ligne tableau SB
        k = 0
        r = 0
        c = 0
        buf = StringIO()
        date_day = datetime.now().strftime('%d-%m-%Y')
        wizard = self.browse(
            cr, uid, ids)  # a traité date du wizard ! wizard.date_budget
        budget_obj = self.pool.get('crossovered.budget')
        move_store_obj = self.pool.get('move.store')
        account_obj = self.pool.get('account.account')
        aal_obj = self.pool.get('account.analytic.line')
        journal_obj = self.pool.get('account.analytic.journal')
        budgets = budget_obj.browse(cr, uid, context['active_ids'])
        header_table = [
            'Ligne budgétaire', 'Montant Prévu', 'Montant Engagement',
            'Montant Disponible', 'Montant Réel', 'Montant Théorique',
            'Pourcentage', 'E/B', 'R/B'
        ]

        # chargement de la configuration du rapport
        # valeurs par default
        config_report_id = self.pool.get('config.table.xls').search(
            cr, uid, [('code', '=', 'SB')])
        complete_name = "Situation Budgetaire"
        title_feuille_report = "Situation Budgetaire"
        title_report = "Situation Budgetaire"
        name_report_out = "Situation Budgetaire"
        color_entete_table = "FFDDDDDD"
        if config_report_id:
            config_report = self.pool.get('config.table.xls').browse(
                cr, uid, config_report_id)
            if config_report.title_report:
                title_report = config_report.title_report
            if config_report.title_feuille_report:
                title_feuille_report = config_report.title_feuille_report
            if config_report.name_report_out:
                name_report_out = config_report.name_report_out
            if config_report.row_start != 0:
                i = config_report.row_start
            if config_report.column_start != 0:
                j = config_report.column_start
            if config_report.couleur_tableau_header:
                color_entete_table = config_report.couleur_tableau_header.replace(
                    "#", "FF")
        fl1 = PatternFill(fill_type='solid',
                          start_color=color_entete_table,
                          end_color=color_entete_table)

        # Déclaration de la feuille excel wb
        wb = Workbook(guess_types=True)
        ws = wb.active
        ws.title = title_feuille_report

        # remplissage de la feuille excel avec les données
        # ecrire titre principale du rapport
        title1 = title_report
        ce = ws.cell(row=1, column=j, value=title1)
        ce.font = ft1
        ce.alignment = al1
        ce.fill = fl2
        ce.border = b2
        ce1 = ws.cell(row=1, column=j)
        ce1 = ws.merge_cells(start_row=1,
                             start_column=j,
                             end_row=1,
                             end_column=j + 4)

        # boucler sur les budgets selectionnés
        for budget in budgets:
            # ecrire les informations du budget
            # nom
            name_budget = "Budget :  " + budget.name
            ce = ws.cell(row=i - 3, column=j, value=name_budget)
            ce.font = ft3
            ce.fill = fl3
            ce.border = b2
            # code
            code_budget = "Code :  " + budget.code
            ce = ws.cell(row=i - 3, column=j + 1, value=code_budget)
            ce.font = ft3
            ce.fill = fl3
            ce.border = b2
            # durée
            date_budget = "Durée :  " + wizard.date_budget_start + " - " + wizard.date_budget_end
            ce = ws.cell(row=i - 3, column=j + 2, value=date_budget)
            ce.font = ft3
            ce.fill = fl3
            ce.border = b2
            ce1 = ws.cell(row=i - 3, column=j + 2)
            ce1 = ws.merge_cells(start_row=i - 3,
                                 start_column=j + 2,
                                 end_row=i - 3,
                                 end_column=j + 3)
            #date d'impréssion)
            date_print = "Imprimé :  " + date_day
            ce = ws.cell(row=i - 3, column=j + 4, value=date_print)
            ce.font = ft3
            ce.fill = fl3
            ce.border = b2

            # écrire entête du rapport
            k = 0
            for header in header_table:
                ce = ws.cell(row=i, column=j + k, value=header)
                ce.fill = fl1
                ce.font = ft3
                ce.alignment = al1
                ce.border = b2
                ws.column_dimensions[ce.column].width = 26.0
                k += 1
            k = 1
            if budget:
                for bl in budget.crossovered_budget_line:
                    # Libellé
                    ce = ws.cell(row=i + k, column=j, value=bl.name)
                    ce.border = b2
                    # Poste budgetaire
                    # ce=ws.cell(row=i+k,column=j,value=bl.general_budget_id.name)
                    # ce.border=b2
                    # Montant Prévu
                    ce = ws.cell(row=i + k,
                                 column=j + 1,
                                 value=bl.planned_amount)
                    ce.border = b2
                    # Montant d'Engagement
                    acc_ids = [x.id for x in bl.general_budget_id.account_ids]
                    if not acc_ids:
                        raise osv.except_osv(
                            _('Error!'),
                            _("The Budget '%s' has no accounts!") %
                            str(bl.general_budget_id.name))
                    acc_ids = account_obj._get_children_and_consol(
                        cr, uid, acc_ids, context=context)
                    sql_string = "SELECT SUM(al.amount) " \
                                 "FROM account_analytic_line al " \
                                 "LEFT JOIN account_analytic_journal aj ON al.journal_id = aj.id " \
                                 "WHERE al.budget_line_id = %s " \
                                 "AND (al.date between to_date(%s,'yyyy-mm-dd') AND to_date(%s,'yyyy-mm-dd')) " \
                                 "AND al.general_account_id=ANY(%s) AND aj.commitment_journal is true"
                    sql_args = (bl.id, wizard.date_budget_start,
                                wizard.date_budget_end, acc_ids)
                    cr.execute(sql_string, sql_args)
                    result = cr.fetchone()[0]
                    if not result:
                        result = 0.0
                    ce = ws.cell(row=i + k, column=j + 2, value=abs(result))
                    ce.border = b2
                    # Montant Disponible : available_amount
                    available_amount = bl.planned_amount - bl.commitment_amount
                    ce = ws.cell(row=i + k,
                                 column=j + 3,
                                 value=available_amount)
                    ce.border = b2
                    # Montant Réel : practical_amount
                    ## début traitement
                    histo = False
                    practical_amount = 0
                    result_final = 0
                    result_partial = 0
                    # Vérifié si il y a des lignes d'historisé lié avec les lignes
                    for aa in bl.analytic_account_id:
                        for aal in aa.line_ids:
                            if aal.move_id:
                                move_store_id = move_store_obj.search(
                                    cr, uid,
                                    [('move_line_id', '=', aal.move_id.id),
                                     ('create_date', '>=',
                                      wizard.date_budget_end)])
                                if move_store_id:
                                    histo = True
                    if histo == False:
                        practical_amount = bl.practical_amount
                    else:
                        # get purchase journal
                        journal_analytic_ids = journal_obj.search(
                            cr, uid, [('type', '=', 'purchase')])
                        journal_analytic = journal_obj.browse(
                            cr, uid, journal_analytic_ids).id
                        # get general budget
                        acc_ids = [
                            x.id for x in bl.general_budget_id.account_ids
                        ]
                        if not acc_ids:
                            raise osv.except_osv(
                                _('Error!'),
                                _("The Budget '%s' has no accounts!") %
                                str(bl.general_budget_id.name))
                        acc_ids = account_obj._get_children_and_consol(
                            cr, uid, acc_ids, context=context)
                        for aa in bl.analytic_account_id:
                            result_partial = 0
                            aal_ids = aal_obj.search(
                                cr, uid, [('account_id', '=', aa.id)])
                            if aal_ids:
                                aals = aal_obj.browse(cr, uid, aal_ids)
                                for aal in aals:
                                    move_store_id = move_store_obj.search(
                                        cr, uid,
                                        [('move_line_id', '=', aal.move_id.id),
                                         ('create_date', '>=',
                                          wizard.date_budget_end)])
                                    if move_store_id:  # ajouter controle date wizard avec date move store
                                        move_store = move_store_obj.browse(
                                            cr, uid, move_store_id)[0]
                                        if move_store.debit:
                                            result_partial += move_store.debit
                                    else:
                                        result_partial += aal.amount
                                        # cr.execute("SELECT amount FROM account_analytic_line WHERE id=%s AND account_id=%s AND journal_id = %s AND (date "
                                        #     "between to_date(%s,'yyyy-mm-dd') AND to_date(%s,'yyyy-mm-dd')) AND "
                                        #     "general_account_id=ANY(%s)", (aal.id,aa.id,journal_analytic, bl.date_from, bl.date_to,acc_ids,))
                                        # result = cr.fetchone()[0]
                                        #if result != None:
                                        #    result_partial += result
                                result_final += result_partial

                    acc_ids = [x.id for x in bl.general_budget_id.account_ids]
                    if not acc_ids:
                        raise osv.except_osv(
                            _('Error!'),
                            _("The Budget '%s' has no accounts!") %
                            str(bl.general_budget_id.name))
                    acc_ids = account_obj._get_children_and_consol(
                        cr, uid, acc_ids, context=context)
                    sql_string = "SELECT SUM(al.amount) " \
                                 "FROM account_analytic_line al " \
                                 "LEFT JOIN account_analytic_journal aj ON al.journal_id = aj.id " \
                                 "WHERE al.budget_line_id = %s " \
                                 "AND (al.date between to_date(%s,'yyyy-mm-dd') AND to_date(%s,'yyyy-mm-dd')) " \
                                 "AND al.general_account_id=ANY(%s) AND aj.commitment_journal is not true"
                    sql_args = (bl.id, wizard.date_budget_start,
                                wizard.date_budget_end, acc_ids)
                    cr.execute(sql_string, sql_args)
                    result = cr.fetchone()[0]
                    if not result:
                        result = 0.0
                    ce = ws.cell(row=i + k, column=j + 4, value=abs(result))
                    ce.border = b2
                    ## Fin traitement
                    # Montant Théorique : theoritical_amount
                    from_date = datetime.strptime(budget.date_from, '%Y-%m-%d')
                    to_date = datetime.strptime(budget.date_to, '%Y-%m-%d')
                    todays = datetime.strptime(
                        datetime.now().strftime('%Y-%m-%d'), '%Y-%m-%d')
                    theoritical_amount = (bl.planned_amount /
                                          ((to_date - from_date).days + 1)) * (
                                              (todays - from_date).days + 1)
                    ce = ws.cell(row=i + k,
                                 column=j + 5,
                                 value=theoritical_amount)
                    ce.border = b2
                    # Pourcentage : percentage
                    # percentage=(practical_amount/theoritical_amount)*100
                    ce = ws.cell(row=i + k, column=j + 6, value=bl.percentage)
                    ce.border = b2
                    # E/B
                    if bl.planned_amount != 0:
                        e_b = (bl.commitment_amount / bl.planned_amount) * 100
                    else:
                        e_b = 0
                    ce = ws.cell(row=i + k, column=j + 7, value=e_b)
                    ce.border = b2
                    # R/B
                    if bl.planned_amount != 0:
                        r_b = (bl.practical_amount / bl.planned_amount) * 100
                    else:
                        r_b = 0
                    ce = ws.cell(row=i + k, column=j + 8, value=r_b)
                    ce.border = b2
                    k += 1
            i += k + 7

        #enrgistrement et préparation du fichier excel
        wb.save(buf)
        fichier = name_report_out + "_" + str(date_day) + ".xlsx"
        out = base64.encodestring(buf.getvalue())
        buf.close()
        vals = {'data': out, 'name_file': fichier}
        wizard_id = self.pool.get("report.excel.wizard").create(
            cr, uid, vals, context=context)
        return {
            'name': _("Rapport Excel" + complete_name),
            'view_mode': 'form',
            'view_id': False,
            'view_type': 'form',
            'res_model': 'report.excel.wizard',
            'res_id': wizard_id,
            'type': 'ir.actions.act_window',
            'target': 'new',
            'domain': '[]',
        }
예제 #33
0
    def pdf(self):
        """ Generates PDF invoice """

        # Test if we're already stored. Programmer error if this invoice is not in the database yet.
        assert self.id

        # FIXME This function only works for periodic invoices at this time

        # Verify the watermark PDF exists or bail
        if not exists(config.letterhead_paper_path):
            raise IOError(errno.ENOENT)

        # Read the letterhead paper
        # FIXME Make page size configurable
        pdf_buffer = StringIO()
        canvas = Canvas(pdf_buffer, pagesize=A4)
        pdfmetrics.registerFont(TTFont('FreeSerif', 'fonts' + os.sep + 'FreeSerif.ttf'))
        pdfmetrics.registerFont(TTFont('FreeSerifB', 'fonts' + os.sep + 'FreeSerifBold.ttf'))
        pdfmetrics.registerFont(TTFont('FreeSerifI', 'fonts' + os.sep + 'FreeSerifItalic.ttf'))
        pdfmetrics.registerFont(TTFont('FreeSerifBI', 'fonts' + os.sep + 'FreeSerifBoldItalic.ttf'))

        # Draw the address
        # FIXME the invoice contact should be added
        canvas.setFont("FreeSerif", 12)
        canvas.drawString(40 * mm, A4[1] - (60 * mm), self.customer.invoice_name)
        canvas.drawString(40 * mm, A4[1] - (60 * mm), self.customer.invoice_contact.displayname)
        canvas.drawString(40 * mm, A4[1] - (65 * mm), self.customer.postal_address)
        canvas.drawString(40 * mm, A4[1] - (70 * mm), self.customer.postal_code_zip + " " + self.customer.postal_city)
        canvas.drawString(40 * mm, A4[1] - (75 * mm), self.customer.postal_country)

        # Draw the invoice information
        # FIXME 1. Need locale support for invoices
        # FIXME 2. Need customer reference and order numbers for the subscriptions
        # FIXME 3. The currency is still hardcoded
        canvas.drawString(10 * mm, A4[1] - (100 * mm), 'Invoice number:')
        canvas.drawString(50 * mm, A4[1] - (100 * mm), self.full_invoice_no)
        canvas.drawString(110 * mm, A4[1] - (100 * mm), 'Order number:')
        canvas.drawString(140 * mm, A4[1] - (100 * mm), '-')
        canvas.drawString(10 * mm, A4[1] - (105 * mm), 'Customer reference:')
        canvas.drawString(50 * mm, A4[1] - (105 * mm), '-')
        canvas.drawString(110 * mm, A4[1] - (105 * mm), 'ISO-4217 currency:')
        canvas.drawString(140 * mm, A4[1] - (105 * mm), 'EUR')

        # Draw the invoice data header
        canvas.setFont("FreeSerifB", 12)
        canvas.drawString(10 * mm, A4[1] - (115 * mm), 'Product')
        canvas.drawString(50 * mm, A4[1] - (115 * mm), 'Period')
        canvas.drawString(110 * mm, A4[1] - (115 * mm), 'Extra information')
        canvas.drawString(175 * mm, A4[1] - (115 * mm), 'Price')

        total_amount = 0
        total_vat = 0

        y = 120
        for item in self.items.all():
            canvas.setFont("FreeSerif", 12)
            canvas.drawString(10 * mm, A4[1] - (y * mm), item.product.name)
            canvas.drawString(50 * mm, A4[1] - (y * mm), item.period)
            canvas.drawString(110 * mm, A4[1] - (y * mm), item.description)
            # FIXME Need variable currency
            canvas.setFont("FreeSerif", 10)
            canvas.drawString(175 * mm, A4[1] - (y * mm), '€')
            canvas.setFont("Courier", 10)
            canvas.drawString(178 * mm, A4[1] - (y * mm), "%10.2f" % item.amount)
            y += 5
            total_amount += item.amount
            total_vat += item.amount * item.vat.percent

        canvas.drawString(175 * mm, A4[1] - ((y-4) * mm), "____________")
        canvas.setFont("FreeSerif", 12)
        canvas.drawString(150 * mm, A4[1] - (y * mm), 'Subtotal')
        canvas.setFont("FreeSerif", 10)
        canvas.drawString(175 * mm, A4[1] - (y * mm), '€')
        canvas.setFont("Courier", 10)
        canvas.drawString(178 * mm, A4[1] - (y * mm), "%10.2f" % total_amount)
        y += 5
        canvas.setFont("FreeSerif", 12)
        canvas.drawString(150 * mm, A4[1] - (y * mm), 'VAT')
        canvas.setFont("FreeSerif", 10)
        canvas.drawString(175 * mm, A4[1] - (y * mm), '€')
        canvas.setFont("Courier", 10)
        canvas.drawString(178 * mm, A4[1] - (y * mm), "%10.2f" % total_vat)
        y += 5
        canvas.setFont("FreeSerif", 12)
        canvas.drawString(150 * mm, A4[1] - (y * mm), 'Total')
        canvas.setFont("FreeSerif", 10)
        canvas.drawString(175 * mm, A4[1] - (y * mm), '€')
        canvas.setFont("Courier", 10)
        canvas.drawString(178 * mm, A4[1] - (y * mm), "%10.2f" % (total_amount + total_vat))
        y += 5

        # Finish the page and save the PDF
        canvas.showPage()
        canvas.save()

        # Merge the letterhead paper with the data
        letterhead = PdfFileReader(file(settings.letterhead_paper_path, "rb"))
        page = letterhead.getPage(0)
        pdfInput = PdfFileReader(StringIO(pdf_buffer.getvalue())) 
        page.mergePage(pdfInput.getPage(0))
        output = PdfFileWriter() 
        output.addPage(page)
        pdf_buffer.close()

        filename = "%s/invoices/%s.pdf" % (STATIC_ROOT, self.full_invoice_no)
        output.write(file(filename, "wb"))

        return filename
예제 #34
0
class danfe(object):
    def __init__(self, sizepage=A4, list_xml=None, recibo=True,
                 orientation='portrait', logo=None):
        self.width = 210    # 21 x 29,7cm
        self.height = 297
        self.nLeft = 10
        self.nRight = 10
        self.nTop = 7
        self.nBottom = 8
        self.nlin = self.nTop
        self.logo = logo
        self.oFrete = {'0': '0 - Emitente',
                       '1': '1 - Dest/Remet',
                       '2': '2 - Terceiros',
                       '9': '9 - Sem Frete'}

        self.oPDF_IO = IO()
        if orientation == 'landscape':
            raise NameError('Rotina não implementada')
        else:
            size = sizepage

        self.canvas = canvas.Canvas(self.oPDF_IO, pagesize=size)
        self.canvas.setTitle('DANFE')
        self.canvas.setStrokeColor(black)

        for oXML in list_xml:
            oXML_cobr = oXML.find(
                ".//{http://www.portalfiscal.inf.br/nfe}cobr")

            self.NrPages = 1
            self.Page = 1

            # Calculando total linhas usadas para descrições dos itens
            # Com bloco fatura, apenas 29 linhas para itens na primeira folha
            nNr_Lin_Pg_1 = 34 if oXML_cobr is None else 30
            # [ rec_ini , rec_fim , lines , limit_lines ]
            oPaginator = [[0, 0, 0, nNr_Lin_Pg_1]]
            el_det = oXML.findall(".//{http://www.portalfiscal.inf.br/nfe}det")
            if el_det is not None:
                list_desc = []
                list_cod_prod = []
                nPg = 0
                for nId, item in enumerate(el_det):
                    el_prod = item.find(
                        ".//{http://www.portalfiscal.inf.br/nfe}prod")
                    infAdProd = item.find(
                        ".//{http://www.portalfiscal.inf.br/nfe}infAdProd")

                    list_ = wrap(tagtext(oNode=el_prod, cTag='xProd'), 56)
                    if infAdProd is not None:
                        list_.extend(wrap(infAdProd.text, 56))
                    list_desc.append(list_)

                    list_cProd = wrap(tagtext(oNode=el_prod, cTag='cProd'), 14)
                    list_cod_prod.append(list_cProd)

                    # Nr linhas necessárias p/ descrição item
                    nLin_Itens = len(list_)

                    if (oPaginator[nPg][2] + nLin_Itens) >= oPaginator[nPg][3]:
                        oPaginator.append([0, 0, 0, 77])
                        nPg += 1
                        oPaginator[nPg][0] = nId
                        oPaginator[nPg][1] = nId + 1
                        oPaginator[nPg][2] = nLin_Itens
                    else:
                        # adiciona-se 1 pelo funcionamento de xrange
                        oPaginator[nPg][1] = nId + 1
                        oPaginator[nPg][2] += nLin_Itens

                self.NrPages = len(oPaginator)   # Calculando nr. páginas

            if recibo:
                self.recibo_entrega(oXML=oXML)

            self.ide_emit(oXML=oXML)
            self.destinatario(oXML=oXML)

            if oXML_cobr is not None:
                self.faturas(oXML=oXML_cobr)

            self.impostos(oXML=oXML)
            self.transportes(oXML=oXML)
            self.produtos(oXML=oXML, el_det=el_det, oPaginator=oPaginator[0],
                          list_desc=list_desc, list_cod_prod=list_cod_prod)

            self.adicionais(oXML=oXML)

            # Gera o restante das páginas do XML
            for oPag in oPaginator[1:]:
                self.newpage()
                self.ide_emit(oXML=oXML)
                self.produtos(oXML=oXML, el_det=el_det, oPaginator=oPag,
                              list_desc=list_desc, nHeight=77,
                              list_cod_prod=list_cod_prod)

            self.newpage()

        self.canvas.save()

    def ide_emit(self, oXML=None):
        elem_infNFe = oXML.find(
            ".//{http://www.portalfiscal.inf.br/nfe}infNFe")
        elem_protNFe = oXML.find(
            ".//{http://www.portalfiscal.inf.br/nfe}protNFe")
        elem_emit = oXML.find(".//{http://www.portalfiscal.inf.br/nfe}emit")
        elem_ide = oXML.find(".//{http://www.portalfiscal.inf.br/nfe}ide")

        cChave = elem_infNFe.attrib.get('Id')[3:]
        barcode128 = code128.Code128(cChave, barHeight=10*mm, barWidth=0.25*mm)

        self.canvas.setLineWidth(.5)
        self.rect(self.nLeft, self.nlin+1, self.nLeft+75, 32)
        self.rect(self.nLeft+115, self.nlin+1,
                  self.width-self.nLeft-self.nRight-115, 39)

        self.hline(self.nLeft+85, self.nlin+1, 125)

        self.rect(self.nLeft+116, self.nlin+15,
                  self.width-self.nLeft-self.nRight-117, 6)

        self.rect(self.nLeft, self.nlin+33,
                  self.width-self.nLeft-self.nRight, 14)
        self.hline(self.nLeft, self.nlin+40, self.width-self.nRight)
        self.vline(self.nLeft+60, self.nlin+40, 7)
        self.vline(self.nLeft+100, self.nlin+40, 7)

        # Labels
        self.canvas.setFont('NimbusSanL-Bold', 12)
        self.stringcenter(self.nLeft+98, self.nlin+5, 'DANFE')
        self.stringcenter(self.nLeft+109, self.nlin+19.5,
                          tagtext(oNode=elem_ide, cTag='tpNF'))
        self.canvas.setFont('NimbusSanL-Bold', 8)
        cNF = tagtext(oNode=elem_ide, cTag='nNF')
        cNF = '{0:011,}'.format(int(cNF)).replace(",", ".")
        self.stringcenter(self.nLeft+100, self.nlin+25, "Nº %s" % (cNF))

        self.stringcenter(self.nLeft+100, self.nlin+29, u"SÉRIE %s" % (
            tagtext(oNode=elem_ide, cTag='serie')))
        cPag = "Página %s de %s" % (str(self.Page), str(self.NrPages))
        self.stringcenter(self.nLeft+100, self.nlin+32, cPag)
        self.canvas.setFont('NimbusSanL-Regu', 6)
        self.string(self.nLeft+86, self.nlin+8, 'Documento Auxiliar da')
        self.string(self.nLeft+86, self.nlin+10.5, 'Nota Fiscal Eletrônica')
        self.string(self.nLeft+86, self.nlin+16, '0 - Entrada')
        self.string(self.nLeft+86, self.nlin+19, '1 - Saída')
        self.rect(self.nLeft+105, self.nlin+15, 8, 6)

        self.stringcenter(
            self.nLeft+152, self.nlin+25,
            'Consulta de autenticidade no portal nacional da NF-e')
        self.stringcenter(
            self.nLeft+152, self.nlin+28,
            'www.nfe.fazenda.gov.br/portal ou no site da SEFAZ Autorizadora')
        self.canvas.setFont('NimbusSanL-Regu', 5)
        self.string(self.nLeft+117, self.nlin+16.7, 'CHAVE DE ACESSO')
        self.string(self.nLeft+116, self.nlin+2.7, 'CONTROLE DO FISCO')

        self.string(self.nLeft+1, self.nlin+34.7, 'NATUREZA DA OPERAÇÃO')
        self.string(self.nLeft+116, self.nlin+34.7,
                    'PROTOCOLO DE AUTORIZAÇÃO DE USO')
        self.string(self.nLeft+1, self.nlin+41.7, 'INSCRIÇÃO ESTADUAL')
        self.string(self.nLeft+61, self.nlin+41.7,
                    'INSCRIÇÃO ESTADUAL DO SUBST. TRIB.')
        self.string(self.nLeft+101, self.nlin+41.7, 'CNPJ')

        # Conteúdo campos
        barcode128.drawOn(self.canvas, (self.nLeft+111.5)*mm,
                          (self.height-self.nlin-14)*mm)
        self.canvas.setFont('NimbusSanL-Bold', 6)
        nW_Rect = (self.width-self.nLeft-self.nRight-117) / 2
        self.stringcenter(self.nLeft+116.5+nW_Rect, self.nlin+19.5,
                          ' '.join(chunks(cChave, 4)))  # Chave
        self.canvas.setFont('NimbusSanL-Regu', 8)
        cDt, cHr = getdateUTC(tagtext(oNode=elem_protNFe, cTag='dhRecbto'))
        cProtocolo = tagtext(oNode=elem_protNFe, cTag='nProt')
        cDt = cProtocolo + ' - ' + cDt + ' ' + cHr
        nW_Rect = (self.width-self.nLeft-self.nRight-110) / 2
        self.stringcenter(self.nLeft+115+nW_Rect, self.nlin+38.7, cDt)
        self.canvas.setFont('NimbusSanL-Regu', 8)
        self.string(self.nLeft+1, self.nlin+38.7,
                    tagtext(oNode=elem_ide, cTag='natOp'))
        self.string(self.nLeft+1, self.nlin+46,
                    tagtext(oNode=elem_emit, cTag='IE'))
        self.string(self.nLeft+101, self.nlin+46,
                    format_cnpj_cpf(tagtext(oNode=elem_emit, cTag='CNPJ')))

        styles = getSampleStyleSheet()
        styleN = styles['Normal']
        styleN.fontSize = 10
        styleN.fontName = 'NimbusSanL-Bold'
        styleN.alignment = TA_CENTER

        # Razão Social emitente
        P = Paragraph(tagtext(oNode=elem_emit, cTag='xNome'), styleN)
        w, h = P.wrap(55*mm, 50*mm)
        P.drawOn(self.canvas, (self.nLeft+30)*mm,
                 (self.height-self.nlin-12)*mm)

        if self.logo:
            img = get_image(self.logo, width=2*cm)
            img.drawOn(self.canvas, (self.nLeft+5)*mm,
                       (self.height-self.nlin-22)*mm)

        cEnd = tagtext(oNode=elem_emit, cTag='xLgr') + ', ' + tagtext(
            oNode=elem_emit, cTag='nro') + ' - '
        cEnd += tagtext(oNode=elem_emit, cTag='xBairro') + '<br />' + tagtext(
            oNode=elem_emit, cTag='xMun') + ' - '
        cEnd += 'Fone: ' + tagtext(oNode=elem_emit, cTag='fone') + '<br />'
        cEnd += tagtext(oNode=elem_emit, cTag='UF') + ' - ' + tagtext(
            oNode=elem_emit, cTag='CEP')

        regime = tagtext(oNode=elem_emit, cTag='CRT')
        cEnd += u'<br />Regime Tributário: %s' % (REGIME_TRIBUTACAO[regime])

        styleN.fontName = 'NimbusSanL-Regu'
        styleN.fontSize = 7
        styleN.leading = 10
        P = Paragraph(cEnd, styleN)
        w, h = P.wrap(55*mm, 30*mm)
        P.drawOn(self.canvas, (self.nLeft+30)*mm,
                 (self.height-self.nlin-31)*mm)

        # Homologação
        if tagtext(oNode=elem_ide, cTag='tpAmb') == '2':
            self.canvas.saveState()
            self.canvas.rotate(90)
            self.canvas.setFont('Times-Bold', 40)
            self.canvas.setFillColorRGB(0.57, 0.57, 0.57)
            self.string(self.nLeft+65, 449, 'SEM VALOR FISCAL')
            self.canvas.restoreState()

        self.nlin += 48

    def destinatario(self, oXML=None):
        elem_ide = oXML.find(".//{http://www.portalfiscal.inf.br/nfe}ide")
        elem_dest = oXML.find(".//{http://www.portalfiscal.inf.br/nfe}dest")
        nMr = self.width-self.nRight

        self.nlin += 1

        self.canvas.setFont('NimbusSanL-Bold', 7)
        self.string(self.nLeft+1, self.nlin+1, 'DESTINATÁRIO/REMETENTE')
        self.rect(self.nLeft, self.nlin+2,
                  self.width-self.nLeft-self.nRight, 20)
        self.vline(nMr-25, self.nlin+2, 20)
        self.hline(self.nLeft, self.nlin+8.66, self.width-self.nLeft)
        self.hline(self.nLeft, self.nlin+15.32, self.width-self.nLeft)
        self.vline(nMr-70, self.nlin+2, 6.66)
        self.vline(nMr-53, self.nlin+8.66, 6.66)
        self.vline(nMr-99, self.nlin+8.66, 6.66)
        self.vline(nMr-90, self.nlin+15.32, 6.66)
        self.vline(nMr-102, self.nlin+15.32, 6.66)
        self.vline(nMr-136, self.nlin+15.32, 6.66)
        # Labels/Fields
        self.canvas.setFont('NimbusSanL-Bold', 5)
        self.string(self.nLeft+1, self.nlin+3.7, 'NOME/RAZÃO SOCIAL')
        self.string(nMr-69, self.nlin+3.7, 'CNPJ/CPF')
        self.string(nMr-24, self.nlin+3.7, 'DATA DA EMISSÃO')
        self.string(self.nLeft+1, self.nlin+10.3, 'ENDEREÇO')
        self.string(nMr-98, self.nlin+10.3, 'BAIRRO/DISTRITO')
        self.string(nMr-52, self.nlin+10.3, 'CEP')
        self.string(nMr-24, self.nlin+10.3, 'DATA DE ENTRADA/SAÍDA')
        self.string(self.nLeft+1, self.nlin+17.1, 'MUNICÍPIO')
        self.string(nMr-135, self.nlin+17.1, 'FONE/FAX')
        self.string(nMr-101, self.nlin+17.1, 'UF')
        self.string(nMr-89, self.nlin+17.1, 'INSCRIÇÃO ESTADUAL')
        self.string(nMr-24, self.nlin+17.1, 'HORA DE ENTRADA/SAÍDA')
        # Conteúdo campos
        self.canvas.setFont('NimbusSanL-Regu', 8)
        self.string(self.nLeft+1, self.nlin+7.5,
                    tagtext(oNode=elem_dest, cTag='xNome'))
        self.string(nMr-69, self.nlin+7.5,
                    format_cnpj_cpf(tagtext(oNode=elem_dest, cTag='CNPJ')))
        cDt, cHr = getdateUTC(tagtext(oNode=elem_ide, cTag='dhEmi'))
        self.string(nMr-24, self.nlin+7.7, cDt + ' ' + cHr)
        cDt, cHr = getdateUTC(tagtext(oNode=elem_ide, cTag='dhSaiEnt'))
        self.string(nMr-24, self.nlin+14.3, cDt + ' ' + cHr)  # Dt saída
        cEnd = tagtext(oNode=elem_dest, cTag='xLgr') + ', ' + tagtext(
            oNode=elem_dest, cTag='nro')
        self.string(self.nLeft+1, self.nlin+14.3, cEnd)
        self.string(nMr-98, self.nlin+14.3,
                    tagtext(oNode=elem_dest, cTag='xBairro'))
        self.string(nMr-52, self.nlin+14.3,
                    tagtext(oNode=elem_dest, cTag='CEP'))
        self.string(self.nLeft+1, self.nlin+21.1,
                    tagtext(oNode=elem_dest, cTag='xMun'))
        self.string(nMr-135, self.nlin+21.1,
                    tagtext(oNode=elem_dest, cTag='fone'))
        self.string(nMr-101, self.nlin+21.1,
                    tagtext(oNode=elem_dest, cTag='UF'))
        self.string(nMr-89, self.nlin+21.1,
                    tagtext(oNode=elem_dest, cTag='IE'))

        self.nlin += 24  # Nr linhas ocupadas pelo bloco

    def faturas(self, oXML=None):

        nMr = self.width-self.nRight

        self.canvas.setFont('NimbusSanL-Bold', 7)
        self.string(self.nLeft+1, self.nlin+1, 'FATURA')
        self.rect(self.nLeft, self.nlin+2,
                  self.width-self.nLeft-self.nRight, 13)
        self.vline(nMr-47.5, self.nlin+2, 13)
        self.vline(nMr-95, self.nlin+2, 13)
        self.vline(nMr-142.5, self.nlin+2, 13)
        self.hline(nMr-47.5, self.nlin+8.5, self.width-self.nLeft)
        # Labels
        self.canvas.setFont('NimbusSanL-Regu', 5)
        self.string(nMr-46.5, self.nlin+3.8, 'CÓDIGO VENDEDOR')
        self.string(nMr-46.5, self.nlin+10.2, 'NOME VENDEDOR')
        self.string(nMr-93.5, self.nlin+3.8,
                    'FATURA          VENCIMENTO           VALOR')
        self.string(nMr-140.5, self.nlin+3.8,
                    'FATURA          VENCIMENTO           VALOR')
        self.string(self.nLeft+2, self.nlin+3.8,
                    'FATURA         VENCIMENTO            VALOR')

        # Conteúdo campos
        self.canvas.setFont('NimbusSanL-Bold', 6)
        nLin = 7
        nPar = 1
        nCol = 0
        nAju = 0

        line_iter = iter(oXML[1:10])  # Salta elemt 1 e considera os próximos 9
        for oXML_dup in line_iter:

            cDt, cHr = getdateUTC(tagtext(oNode=oXML_dup, cTag='dVenc'))
            self.string(self.nLeft+nCol+1, self.nlin+nLin,
                        tagtext(oNode=oXML_dup, cTag='nDup'))
            self.string(self.nLeft+nCol+17, self.nlin+nLin, cDt)
            self.stringRight(
                self.nLeft+nCol+47, self.nlin+nLin,
                format_number(tagtext(oNode=oXML_dup, cTag='vDup'),
                              precision=2))

            if nPar == 3:
                nLin = 7
                nPar = 1
                nCol += 47
                nAju += 1
                nCol += nAju * (0.3)
            else:
                nLin += 3.3
                nPar += 1

        # Campos adicionais XML - Condicionados a existencia de financeiro
        elem_infAdic = oXML.getparent().find(
            ".//{http://www.portalfiscal.inf.br/nfe}infAdic")
        if elem_infAdic is not None:
            codvend = elem_infAdic.find(
                ".//{http://www.portalfiscal.inf.br/nfe}obsCont\
[@xCampo='CodVendedor']")
            self.string(nMr-46.5, self.nlin+7.7,
                        tagtext(oNode=codvend, cTag='xTexto'))
            vend = elem_infAdic.find(".//{http://www.portalfiscal.inf.br/nfe}\
obsCont[@xCampo='NomeVendedor']")
            self.string(nMr-46.5, self.nlin+14.3,
                        tagtext(oNode=vend, cTag='xTexto')[:36])

        self.nlin += 16  # Nr linhas ocupadas pelo bloco

    def impostos(self, oXML=None):
        # Impostos
        el_total = oXML.find(".//{http://www.portalfiscal.inf.br/nfe}total")
        nMr = self.width-self.nRight
        self.nlin += 1
        self.canvas.setFont('NimbusSanL-Bold', 7)
        self.string(self.nLeft+1, self.nlin+1, 'CÁLCULO DO IMPOSTO')
        self.rect(self.nLeft, self.nlin+2,
                  self.width-self.nLeft-self.nRight, 13)
        self.hline(self.nLeft, self.nlin+8.5, self.width-self.nLeft)
        self.vline(nMr-35, self.nlin+2, 6.5)
        self.vline(nMr-65, self.nlin+2, 6.5)
        self.vline(nMr-95, self.nlin+2, 6.5)
        self.vline(nMr-125, self.nlin+2, 6.5)
        self.vline(nMr-155, self.nlin+2, 6.5)
        self.vline(nMr-35, self.nlin+8.5, 6.5)
        self.vline(nMr-65, self.nlin+8.5, 6.5)
        self.vline(nMr-95, self.nlin+8.5, 6.5)
        self.vline(nMr-125, self.nlin+8.5, 6.5)
        self.vline(nMr-155, self.nlin+8.5, 6.5)
        # Labels
        self.canvas.setFont('NimbusSanL-Regu', 5)
        self.string(self.nLeft+1, self.nlin+3.8, 'BASE DE CÁLCULO DO ICMS')
        self.string(nMr-154, self.nlin+3.8, 'VALOR DO ICMS')
        self.string(nMr-124, self.nlin+3.8, 'BASE DE CÁLCULO DO ICMS ST')
        self.string(nMr-94, self.nlin+3.8, 'VALOR DO ICMS ST')
        self.string(nMr-64, self.nlin+3.8, 'VALOR APROX TRIBUTOS')
        self.string(nMr-34, self.nlin+3.8, 'VALOR TOTAL DOS PRODUTOS')

        self.string(self.nLeft+1, self.nlin+10.2, 'VALOR DO FRETE')
        self.string(nMr-154, self.nlin+10.2, 'VALOR DO SEGURO')
        self.string(nMr-124, self.nlin+10.2, 'DESCONTO')
        self.string(nMr-94, self.nlin+10.2, 'OUTRAS DESP. ACESSÓRIAS')
        self.string(nMr-64, self.nlin+10.2, 'VALOR DO IPI')
        self.string(nMr-34, self.nlin+10.2, 'VALOR TOTAL DA NOTA')

        # Conteúdo campos
        self.canvas.setFont('NimbusSanL-Regu', 8)
        self.stringRight(
            self.nLeft+34, self.nlin+7.7,
            format_number(tagtext(oNode=el_total, cTag='vBC'), precision=2))
        self.stringRight(
            self.nLeft+64, self.nlin+7.7,
            format_number(tagtext(oNode=el_total, cTag='vICMS'), precision=2))
        self.stringRight(
            self.nLeft+94, self.nlin+7.7,
            format_number(tagtext(oNode=el_total, cTag='vBCST'), precision=2))
        self.stringRight(
            nMr-66, self.nlin+7.7,
            format_number(tagtext(oNode=el_total, cTag='vST'), precision=2))
        self.stringRight(
            nMr-36, self.nlin+7.7,
            format_number(tagtext(oNode=el_total, cTag='vTotTrib'),
                          precision=2))
        self.stringRight(
            nMr-1, self.nlin+7.7,
            format_number(tagtext(oNode=el_total, cTag='vProd'), precision=2))
        self.stringRight(
            self.nLeft+34, self.nlin+14.1,
            format_number(tagtext(oNode=el_total, cTag='vFrete'), precision=2))
        self.stringRight(
            self.nLeft+64, self.nlin+14.1,
            format_number(tagtext(oNode=el_total, cTag='vSeg'), precision=2))
        self.stringRight(
            self.nLeft+94, self.nlin+14.1,
            format_number(tagtext(oNode=el_total, cTag='vDesc'), precision=2))
        self.stringRight(
            self.nLeft+124, self.nlin+14.1,
            format_number(tagtext(oNode=el_total, cTag='vOutro'), precision=2))
        self.stringRight(
            self.nLeft+154, self.nlin+14.1,
            format_number(tagtext(oNode=el_total, cTag='vIPI'), precision=2))
        self.stringRight(
            nMr-1, self.nlin+14.1,
            format_number(tagtext(oNode=el_total, cTag='vNF'), precision=2))

        self.nlin += 17   # Nr linhas ocupadas pelo bloco

    def transportes(self, oXML=None):
        el_transp = oXML.find(".//{http://www.portalfiscal.inf.br/nfe}transp")
        nMr = self.width-self.nRight

        self.canvas.setFont('NimbusSanL-Bold', 7)
        self.string(self.nLeft+1, self.nlin+1,
                    'TRANSPORTADOR/VOLUMES TRANSPORTADOS')
        self.canvas.setFont('NimbusSanL-Regu', 5)
        self.rect(self.nLeft, self.nlin+2,
                  self.width-self.nLeft-self.nRight, 20)
        self.hline(self.nLeft, self.nlin+8.6, self.width-self.nLeft)
        self.hline(self.nLeft, self.nlin+15.2, self.width-self.nLeft)
        self.vline(nMr-40, self.nlin+2, 13.2)
        self.vline(nMr-49, self.nlin+2, 20)
        self.vline(nMr-92, self.nlin+2, 6.6)
        self.vline(nMr-120, self.nlin+2, 6.6)
        self.vline(nMr-75, self.nlin+2, 6.6)
        self.vline(nMr-26, self.nlin+15.2, 6.6)
        self.vline(nMr-102, self.nlin+8.6, 6.6)
        self.vline(nMr-85, self.nlin+15.2, 6.6)
        self.vline(nMr-121, self.nlin+15.2, 6.6)
        self.vline(nMr-160, self.nlin+15.2, 6.6)
        # Labels/Fields
        self.string(nMr-39, self.nlin+3.8, 'CNPJ/CPF')
        self.string(nMr-74, self.nlin+3.8, 'PLACA DO VEÍCULO')
        self.string(nMr-91, self.nlin+3.8, 'CÓDIGO ANTT')
        self.string(nMr-119, self.nlin+3.8, 'FRETE POR CONTA')
        self.string(self.nLeft+1, self.nlin+3.8, 'RAZÃO SOCIAL')
        self.string(nMr-48, self.nlin+3.8, 'UF')
        self.string(nMr-39, self.nlin+10.3, 'INSCRIÇÃO ESTADUAL')
        self.string(nMr-48, self.nlin+10.3, 'UF')
        self.string(nMr-101, self.nlin+10.3, 'MUNICÍPIO')
        self.string(self.nLeft+1, self.nlin+10.3, 'ENDEREÇO')
        self.string(nMr-48, self.nlin+17, 'PESO BRUTO')
        self.string(nMr-25, self.nlin+17, 'PESO LÍQUIDO')
        self.string(nMr-84, self.nlin+17, 'NUMERAÇÃO')
        self.string(nMr-120, self.nlin+17, 'MARCA')
        self.string(nMr-159, self.nlin+17, 'ESPÉCIE')
        self.string(self.nLeft+1, self.nlin+17, 'QUANTIDADE')
        # Conteúdo campos
        self.canvas.setFont('NimbusSanL-Regu', 8)
        self.string(self.nLeft+1, self.nlin+7.7,
                    tagtext(oNode=el_transp, cTag='xNome')[:40])
        self.string(self.nLeft+71, self.nlin+7.7,
                    self.oFrete[tagtext(oNode=el_transp, cTag='modFrete')])
        self.string(nMr-39, self.nlin+7.7,
                    format_cnpj_cpf(tagtext(oNode=el_transp, cTag='CNPJ')))
        self.string(self.nLeft+1, self.nlin+14.2,
                    tagtext(oNode=el_transp, cTag='xEnder')[:45])
        self.string(self.nLeft+89, self.nlin+14.2,
                    tagtext(oNode=el_transp, cTag='xMun'))
        self.string(nMr-48, self.nlin+14.2,
                    tagtext(oNode=el_transp, cTag='UF'))
        self.string(nMr-39, self.nlin+14.2,
                    tagtext(oNode=el_transp, cTag='IE'))
        self.string(self.nLeft+1, self.nlin+21.2,
                    tagtext(oNode=el_transp, cTag='qVol'))
        self.string(self.nLeft+31, self.nlin+21.2,
                    tagtext(oNode=el_transp, cTag='esp'))
        self.string(self.nLeft+70, self.nlin+21.2,
                    tagtext(oNode=el_transp, cTag='marca'))
        self.string(self.nLeft+106, self.nlin+21.2,
                    tagtext(oNode=el_transp, cTag='nVol'))
        self.stringRight(
            nMr-27, self.nlin+21.2,
            format_number(tagtext(oNode=el_transp, cTag='pesoB'), precision=3))
        self.stringRight(
            nMr-1, self.nlin+21.2,
            format_number(tagtext(oNode=el_transp, cTag='pesoL'), precision=3))

        self.nlin += 23

    def produtos(self, oXML=None, el_det=None, oPaginator=None,
                 list_desc=None, list_cod_prod=None, nHeight=29):

        nMr = self.width-self.nRight
        nStep = 2.5  # Passo entre linhas
        nH = 7.5 + (nHeight * nStep)  # cabeçalho 7.5
        self.nlin += 1

        self.canvas.setFont('NimbusSanL-Bold', 7)
        self.string(self.nLeft+1, self.nlin+1, 'DADOS DO PRODUTO/SERVIÇO')
        self.rect(self.nLeft, self.nlin+2,
                  self.width-self.nLeft-self.nRight, nH)
        self.hline(self.nLeft, self.nlin+8, self.width-self.nLeft)

        self.canvas.setFont('NimbusSanL-Regu', 5.5)
        # Colunas
        self.vline(self.nLeft+15, self.nlin+2, nH)
        self.stringcenter(self.nLeft+7.5, self.nlin+5.5, 'CÓDIGO')
        self.vline(nMr-7, self.nlin+2, nH)
        self.stringcenter(nMr-3.5, self.nlin+4.5, 'ALÍQ')
        self.stringcenter(nMr-3.5, self.nlin+6.5, 'IPI')
        self.vline(nMr-14, self.nlin+2, nH)
        self.stringcenter(nMr-10.5, self.nlin+4.5, 'ALÍQ')
        self.stringcenter(nMr-10.5, self.nlin+6.5, 'ICMS')
        self.vline(nMr-26, self.nlin+2, nH)
        self.stringcenter(nMr-20, self.nlin+5.5, 'VLR. IPI')
        self.vline(nMr-38, self.nlin+2, nH)
        self.stringcenter(nMr-32, self.nlin+5.5, 'VLR. ICMS')
        self.vline(nMr-50, self.nlin+2, nH)
        self.stringcenter(nMr-44, self.nlin+5.5, 'BC ICMS')
        self.vline(nMr-64, self.nlin+2, nH)
        self.stringcenter(nMr-57, self.nlin+5.5, 'VLR TOTAL')
        self.vline(nMr-77, self.nlin+2, nH)
        self.stringcenter(nMr-70.5, self.nlin+5.5, 'VLR UNIT')
        self.vline(nMr-90, self.nlin+2, nH)
        self.stringcenter(nMr-83.5, self.nlin+5.5, 'QTD')
        self.vline(nMr-96, self.nlin+2, nH)
        self.stringcenter(nMr-93, self.nlin+5.5, 'UNID')
        self.vline(nMr-102, self.nlin+2, nH)
        self.stringcenter(nMr-99, self.nlin+5.5, 'CFOP')
        self.vline(nMr-108, self.nlin+2, nH)
        self.stringcenter(nMr-105, self.nlin+5.5, 'CST')
        self.vline(nMr-117, self.nlin+2, nH)
        self.stringcenter(nMr-112.5, self.nlin+5.5, 'NCM/SH')

        nWidth_Prod = nMr-135-self.nLeft-11
        nCol_ = self.nLeft+20 + (nWidth_Prod / 2)
        self.stringcenter(nCol_, self.nlin+5.5, 'DESCRIÇÃO DO PRODUTO/SERVIÇO')

        # Conteúdo campos
        self.canvas.setFont('NimbusSanL-Regu', 5)
        nLin = self.nlin+10.5

        for id in xrange(oPaginator[0], oPaginator[1]):
            item = el_det[id]
            el_prod = item.find(".//{http://www.portalfiscal.inf.br/nfe}prod")
            el_imp = item.find(
                ".//{http://www.portalfiscal.inf.br/nfe}imposto")

            el_imp_ICMS = el_imp.find(
                ".//{http://www.portalfiscal.inf.br/nfe}ICMS")
            el_imp_IPI = el_imp.find(
                ".//{http://www.portalfiscal.inf.br/nfe}IPI")

            cCST = tagtext(oNode=el_imp_ICMS, cTag='orig') + \
                tagtext(oNode=el_imp_ICMS, cTag='CST')
            vBC = tagtext(oNode=el_imp_ICMS, cTag='vBC')
            vICMS = tagtext(oNode=el_imp_ICMS, cTag='vICMS')
            pICMS = tagtext(oNode=el_imp_ICMS, cTag='pICMS')

            vIPI = tagtext(oNode=el_imp_IPI, cTag='vIPI')
            pIPI = tagtext(oNode=el_imp_IPI, cTag='pIPI')

            self.stringcenter(nMr-112.5, nLin,
                              tagtext(oNode=el_prod, cTag='NCM'))
            self.stringcenter(nMr-105, nLin, cCST)
            self.stringcenter(nMr-99, nLin,
                              tagtext(oNode=el_prod, cTag='CFOP'))
            self.stringcenter(nMr-93, nLin,
                              tagtext(oNode=el_prod, cTag='uCom'))
            self.stringRight(nMr-77.5, nLin, format_number(
                tagtext(oNode=el_prod, cTag='qCom'), precision=4))
            self.stringRight(nMr-64.5, nLin, format_number(
                tagtext(oNode=el_prod, cTag='vUnCom'), precision=2))
            self.stringRight(nMr-50.5, nLin, format_number(
                tagtext(oNode=el_prod, cTag='vProd'), precision=2))
            self.stringRight(nMr-38.5, nLin, format_number(vBC, precision=2))
            self.stringRight(nMr-26.5, nLin, format_number(vICMS, precision=2))
            self.stringRight(nMr-7.5, nLin, format_number(pICMS, precision=2))

            if vIPI:
                self.stringRight(nMr-14.5, nLin,
                                 format_number(vIPI, precision=2))
            if pIPI:
                self.stringRight(nMr-0.5, nLin,
                                 format_number(pIPI, precision=2))

            # Código Item
            line_cod = nLin
            for des in list_cod_prod[id]:
                self.string(self.nLeft+0.2, line_cod, des)
                line_cod += nStep

            # Descrição Item
            line_desc = nLin
            for des in list_desc[id]:
                self.string(self.nLeft+15.5, line_desc, des)
                line_desc += nStep

            nLin = max(line_cod, line_desc)
            self.canvas.setStrokeColor(gray)
            self.hline(self.nLeft, nLin-2, self.width-self.nLeft)
            self.canvas.setStrokeColor(black)

        self.nlin += nH + 3

    def adicionais(self, oXML=None):
        el_infAdic = oXML.find(
            ".//{http://www.portalfiscal.inf.br/nfe}infAdic")

        self.nlin += 2
        self.canvas.setFont('NimbusSanL-Bold', 6)
        self.string(self.nLeft+1, self.nlin+1, 'DADOS ADICIONAIS')
        self.canvas.setFont('NimbusSanL-Regu', 5)
        self.string(self.nLeft+1, self.nlin+4, 'INFORMAÇÕES COMPLEMENTARES')
        self.string((self.width/2)+1, self.nlin+4, 'RESERVADO AO FISCO')
        self.rect(self.nLeft, self.nlin+2,
                  self.width-self.nLeft-self.nRight, 42)
        self.vline(self.width/2, self.nlin+2, 42)
        # Conteúdo campos
        styles = getSampleStyleSheet()
        styleN = styles['Normal']
        styleN.fontSize = 6
        styleN.fontName = 'NimbusSanL-Regu'
        styleN.leading = 7

        fisco = tagtext(oNode=el_infAdic, cTag='infAdFisco')
        observacoes = tagtext(oNode=el_infAdic, cTag='infCpl')
        if fisco:
            observacoes = fisco + ' ' + observacoes
        P = Paragraph(observacoes, styles['Normal'])
        w, h = P.wrap(92*mm, 32*mm)
        altura = (self.height-self.nlin-5)*mm
        P.drawOn(self.canvas, (self.nLeft+1)*mm, altura - h)
        self.nlin += 36

    def recibo_entrega(self, oXML=None):
        el_ide = oXML.find(".//{http://www.portalfiscal.inf.br/nfe}ide")
        el_dest = oXML.find(".//{http://www.portalfiscal.inf.br/nfe}dest")
        el_total = oXML.find(".//{http://www.portalfiscal.inf.br/nfe}total")
        el_emit = oXML.find(".//{http://www.portalfiscal.inf.br/nfe}emit")

        # self.nlin = self.height-self.nBottom-18  # 17 altura recibo
        nW = 40
        nH = 17
        self.canvas.setLineWidth(.5)
        self.rect(self.nLeft, self.nlin,
                  self.width-(self.nLeft+self.nRight), nH)
        self.hline(self.nLeft, self.nlin+8.5, self.width-self.nRight-nW)
        self.vline(self.width-self.nRight-nW, self.nlin, nH)
        self.vline(self.nLeft+nW, self.nlin+8.5, 8.5)

        # Labels
        self.canvas.setFont('NimbusSanL-Regu', 5)
        self.string(self.nLeft+1, self.nlin+10.2, 'DATA DE RECEBIMENTO')
        self.string(self.nLeft+41, self.nlin+10.2,
                    'IDENTIFICAÇÃO E ASSINATURA DO RECEBEDOR')
        self.stringcenter(self.width-self.nRight-(nW/2), self.nlin+2, 'NF-e')
        # Conteúdo campos
        self.canvas.setFont('NimbusSanL-Bold', 8)
        cNF = tagtext(oNode=el_ide, cTag='nNF')
        cNF = '{0:011,}'.format(int(cNF)).replace(",", ".")
        self.string(self.width-self.nRight-nW+2, self.nlin+8, "Nº %s" % (cNF))
        self.string(self.width-self.nRight-nW+2, self.nlin+14,
                    u"SÉRIE %s" % (tagtext(oNode=el_ide, cTag='serie')))

        cDt, cHr = getdateUTC(tagtext(oNode=el_ide, cTag='dhEmi'))
        cTotal = format_number(tagtext(oNode=el_total, cTag='vNF'),
                               precision=2)

        cEnd = tagtext(oNode=el_dest, cTag='xNome') + ' - '
        cEnd += tagtext(oNode=el_dest, cTag='xLgr') + ', ' + tagtext(
            oNode=el_dest, cTag='nro') + ', '
        cEnd += tagtext(oNode=el_dest, cTag='xBairro') + ', ' + tagtext(
            oNode=el_dest, cTag='xMun') + ' - '
        cEnd += tagtext(oNode=el_dest, cTag='UF')

        cString = u"""
        RECEBEMOS DE %s OS PRODUTOS/SERVIÇOS CONSTANTES DA NOTA FISCAL INDICADA
        ABAIXO. EMISSÃO: %s VALOR TOTAL: %s
        DESTINATARIO: %s""" % (tagtext(oNode=el_emit, cTag='xNome'),
                               cDt, cTotal, cEnd)

        styles = getSampleStyleSheet()
        styleN = styles['Normal']
        styleN.fontName = 'NimbusSanL-Regu'
        styleN.fontSize = 6
        styleN.leading = 7

        P = Paragraph(cString, styleN)
        w, h = P.wrap(149*mm, 7*mm)
        P.drawOn(self.canvas, (self.nLeft+1)*mm,
                 ((self.height-self.nlin)*mm) - h)

        self.nlin += 20
        self.hline(self.nLeft, self.nlin, self.width-self.nRight)
        self.nlin += 2

    def newpage(self):
        self.nlin = self.nTop
        self.Page += 1
        self.canvas.showPage()

    def hline(self, x, y, width):
        y = self.height - y
        self.canvas.line(x*mm, y*mm, width*mm, y*mm)

    def vline(self, x, y, width):
        width = self.height - y - width
        y = self.height - y
        self.canvas.line(x*mm, y*mm, x*mm, width*mm)

    def rect(self, col, lin, nWidth, nHeight, fill=False):
        lin = self.height - nHeight - lin
        self.canvas.rect(col*mm, lin*mm, nWidth*mm, nHeight*mm,
                         stroke=True, fill=fill)

    def string(self, x, y, value):
        y = self.height - y
        self.canvas.drawString(x*mm, y*mm, value)

    def stringRight(self, x, y, value):
        y = self.height - y
        self.canvas.drawRightString(x*mm, y*mm, value)

    def stringcenter(self, x, y, value):
        y = self.height - y
        self.canvas.drawCentredString(x*mm, y*mm, value)

    def writeto_pdf(self, fileObj):
        pdf_out = self.oPDF_IO.getvalue()
        self.oPDF_IO.close()
        fileObj.write(pdf_out)
def moderatesignups():
    global commentHashesAndComments
    commentHashesAndComments = {}
    stringio = StringIO()
    stringio.write('<html>\n<head>\n</head>\n\n')

    # redditSession = loginAndReturnRedditSession()
    redditSession = loginOAuthAndReturnRedditSession()
    submissions = getSubmissionsForRedditSession(redditSession)
    flat_comments = getCommentsForSubmissions(submissions)
    retiredHashes = retiredCommentHashes()
    i = 1
    stringio.write(
        '<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
    stringio.write("<h3>")
    stringio.write(os.getcwd())
    stringio.write("<br>\n")
    for submission in submissions:
        stringio.write(submission.title)
        stringio.write("<br>\n")
    stringio.write("</h3>\n\n")
    stringio.write(
        '<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">'
    )
    stringio.write(
        '<input type="submit" value="Copy display-during-signup.py stdout to clipboard">'
    )
    stringio.write('</form>')
    for comment in flat_comments:
        # print comment.is_root
        # print comment.score
        i += 1
        commentHash = sha1()
        commentHash.update(comment.permalink)
        commentHash.update(comment.body.encode('utf-8'))
        commentHash = commentHash.hexdigest()
        if commentHash not in retiredHashes:
            commentHashesAndComments[commentHash] = comment
            authorName = str(
                comment.author
            )  # can be None if author was deleted.  So check for that and skip if it's None.
            stringio.write("<hr>\n")
            stringio.write('<font color="blue"><b>')
            stringio.write(
                authorName
            )  # can be None if author was deleted.  So check for that and skip if it's None.
            stringio.write('</b></font><br>')
            if ParticipantCollection().hasParticipantNamed(authorName):
                stringio.write(
                    ' <small><font color="green">(member)</font></small>')
                # if ParticipantCollection().participantNamed(authorName).isStillIn:
                #    stringio.write(' <small><font color="green">(in)</font></small>')
                # else:
                #    stringio.write(' <small><font color="red">(out)</font></small>')
            else:
                stringio.write(
                    ' <small><font color="red">(not a member)</font></small>')
            stringio.write(
                '<form action="takeaction.html" method="post" target="invisibleiframe">'
            )
            stringio.write(
                '<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">'
            )
            # stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
            # stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
            # stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
            stringio.write(
                '<input type="submit" name="actiontotake" value="Skip comment">'
            )
            stringio.write(
                '<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">'
            )
            stringio.write('<input type="hidden" name="username" value="' +
                           b64encode(authorName) + '">')
            stringio.write('<input type="hidden" name="commenthash" value="' +
                           commentHash + '">')
            stringio.write(
                '<input type="hidden" name="commentpermalink" value="' +
                comment.permalink + '">')
            stringio.write('</form>')

            stringio.write(
                bleach.clean(markdown.markdown(comment.body.encode('utf-8')),
                             tags=['p']))
            stringio.write("\n<br><br>\n\n")

    stringio.write('</html>')
    pageString = stringio.getvalue()
    stringio.close()
    return Response(pageString, mimetype='text/html')
예제 #36
0
 def serialize_tree(self, tree):
     file = StringIO()
     tree.write(file, encoding='utf-8')
     contents = "<?xml version='1.0' encoding='utf-8'?>" + file.getvalue()
     file.close()
     return contents
예제 #37
0
class HTTPRequest():
    def __init__(self, cookies=None, options=None):
        self.c = pycurl.Curl()
        self.rep = StringIO()

        self.cj = cookies  #cookiejar

        self.lastURL = None
        self.lastEffectiveURL = None
        self.abort = False
        self.code = 0  # last http code

        self.header = ""

        self.headers = []  #temporary request header

        self.initHandle()
        self.setInterface(options)
        self.setOptions(options)

        self.c.setopt(pycurl.WRITEFUNCTION, self.write)
        self.c.setopt(pycurl.HEADERFUNCTION, self.writeHeader)

        self.log = getLogger("log")

    def initHandle(self):
        """ sets common options to curl handle """
        self.c.setopt(pycurl.FOLLOWLOCATION, 1)
        self.c.setopt(pycurl.MAXREDIRS, 5)
        self.c.setopt(pycurl.CONNECTTIMEOUT, 30)
        self.c.setopt(pycurl.NOSIGNAL, 1)
        self.c.setopt(pycurl.NOPROGRESS, 1)
        if hasattr(pycurl, "AUTOREFERER"):
            self.c.setopt(pycurl.AUTOREFERER, 1)
        self.c.setopt(pycurl.SSL_VERIFYPEER, 0)
        # Interval for low speed, detects connection loss, but can abort dl if hoster stalls the download
        self.c.setopt(pycurl.LOW_SPEED_TIME, 45)
        self.c.setopt(pycurl.LOW_SPEED_LIMIT, 5)

        #self.c.setopt(pycurl.VERBOSE, 1)

        self.c.setopt(
            pycurl.USERAGENT,
            "Mozilla/5.0 (Windows NT 6.1; Win64; x64;en; rv:5.0) Gecko/20110619 Firefox/5.0"
        )
        if pycurl.version_info()[7]:
            self.c.setopt(pycurl.ENCODING, "gzip, deflate")
        self.c.setopt(pycurl.HTTPHEADER, [
            "Accept: */*", "Accept-Language: en-US,en",
            "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7",
            "Connection: keep-alive", "Keep-Alive: 300", "Expect:"
        ])

    def setInterface(self, options):

        interface, proxy, ipv6 = options["interface"], options[
            "proxies"], options["ipv6"]

        if interface and interface.lower() != "none":
            self.c.setopt(pycurl.INTERFACE, str(interface))

        if proxy:
            if proxy["type"] == "socks4":
                self.c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS4)
            elif proxy["type"] == "socks5":
                self.c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5)
            else:
                self.c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_HTTP)

            self.c.setopt(pycurl.PROXY, str(proxy["address"]))
            self.c.setopt(pycurl.PROXYPORT, proxy["port"])

            if proxy["username"]:
                self.c.setopt(
                    pycurl.PROXYUSERPWD,
                    str("%s:%s" % (proxy["username"], proxy["password"])))

        if ipv6:
            self.c.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
        else:
            self.c.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)

        if "auth" in options:
            self.c.setopt(pycurl.USERPWD, str(options["auth"]))

        if "timeout" in options:
            self.c.setopt(pycurl.LOW_SPEED_TIME, options["timeout"])

    def setOptions(self, options):
        """  Sets same options as available in pycurl  """
        for k, v in options.iteritems():
            if hasattr(pycurl, k):
                self.c.setopt(getattr(pycurl, k), v)

    def addCookies(self):
        """ put cookies from curl handle to cj """
        if self.cj:
            self.cj.addCookies(self.c.getinfo(pycurl.INFO_COOKIELIST))

    def getCookies(self):
        """ add cookies from cj to curl handle """
        if self.cj:
            for c in self.cj.getCookies():
                self.c.setopt(pycurl.COOKIELIST, c)
        return

    def clearCookies(self):
        self.c.setopt(pycurl.COOKIELIST, "")

    def setRequestContext(self,
                          url,
                          get,
                          post,
                          referer,
                          cookies,
                          multipart=False):
        """ sets everything needed for the request """

        url = myquote(url)

        if get:
            get = urlencode(get)
            url = "%s?%s" % (url, get)

        self.c.setopt(pycurl.URL, url)
        self.lastURL = url

        if post:
            self.c.setopt(pycurl.POST, 1)
            if not multipart:
                if type(post) == unicode:
                    post = str(post)  #unicode not allowed
                elif type(post) == str:
                    pass
                else:
                    post = myurlencode(post)

                self.c.setopt(pycurl.POSTFIELDS, post)
            else:
                post = [(x, y.encode('utf8') if type(y) == unicode else y)
                        for x, y in post.iteritems()]
                self.c.setopt(pycurl.HTTPPOST, post)
        else:
            self.c.setopt(pycurl.POST, 0)

        if referer and self.lastURL:
            self.c.setopt(pycurl.REFERER, str(self.lastURL))

        if cookies:
            self.c.setopt(pycurl.COOKIEFILE, "")
            self.c.setopt(pycurl.COOKIEJAR, "")
            self.getCookies()

    def load(self,
             url,
             get={},
             post={},
             referer=True,
             cookies=True,
             just_header=False,
             multipart=False,
             decode=False):
        """ load and returns a given page """

        self.setRequestContext(url, get, post, referer, cookies, multipart)

        # TODO: use http/rfc message instead
        self.header = ""

        self.c.setopt(pycurl.HTTPHEADER, self.headers)

        if just_header:
            self.c.setopt(pycurl.FOLLOWLOCATION, 0)
            self.c.setopt(pycurl.NOBODY, 1)  #TODO: nobody= no post?

            # overwrite HEAD request, we want a common request type
            if post:
                self.c.setopt(pycurl.CUSTOMREQUEST, "POST")
            else:
                self.c.setopt(pycurl.CUSTOMREQUEST, "GET")

            try:
                self.c.perform()
                rep = self.header
            finally:
                self.c.setopt(pycurl.FOLLOWLOCATION, 1)
                self.c.setopt(pycurl.NOBODY, 0)
                self.c.unsetopt(pycurl.CUSTOMREQUEST)

        else:
            self.c.perform()
            rep = self.getResponse()

        self.c.setopt(pycurl.POSTFIELDS, "")
        self.lastEffectiveURL = self.c.getinfo(pycurl.EFFECTIVE_URL)
        self.code = self.verifyHeader()

        self.addCookies()

        if decode:
            rep = self.decodeResponse(rep)

        return rep

    def verifyHeader(self):
        """ raise an exceptions on bad headers """
        code = int(self.c.getinfo(pycurl.RESPONSE_CODE))
        # TODO: raise anyway to be consistent, also rename exception
        if code in bad_headers:
            #404 will NOT raise an exception
            raise BadHeader(code, self.getResponse())
        return code

    def checkHeader(self):
        """ check if header indicates failure"""
        return int(self.c.getinfo(pycurl.RESPONSE_CODE)) not in bad_headers

    def getResponse(self):
        """ retrieve response from string io """
        if self.rep is None: return ""
        value = self.rep.getvalue()
        self.rep.close()
        self.rep = StringIO()
        return value

    def decodeResponse(self, rep):
        """ decode with correct encoding, relies on header """
        header = self.header.splitlines()
        encoding = "utf8"  # default encoding

        for line in header:
            line = line.lower().replace(" ", "")
            if not line.startswith("content-type:") or\
               ("text" not in line and "application" not in line):
                continue

            none, delemiter, charset = line.rpartition("charset=")
            if delemiter:
                charset = charset.split(";")
                if charset:
                    encoding = charset[0]

        try:
            #self.log.debug("Decoded %s" % encoding )
            if lookup(encoding).name == 'utf-8' and rep.startswith(BOM_UTF8):
                encoding = 'utf-8-sig'

            decoder = getincrementaldecoder(encoding)("replace")
            rep = decoder.decode(rep, True)

            #TODO: html_unescape as default

        except LookupError:
            self.log.debug("No Decoder found for %s" % encoding)
        except Exception:
            self.log.debug("Error when decoding string from %s." % encoding)

        return rep

    def write(self, buf):
        """ writes response """
        if self.rep.tell() > 1000000 or self.abort:
            rep = self.getResponse()
            if self.abort: raise Abort()
            f = open("response.dump", "wb")
            f.write(rep)
            f.close()
            raise Exception("Loaded Url exceeded limit")

        self.rep.write(buf)

    def writeHeader(self, buf):
        """ writes header """
        self.header += buf

    def putHeader(self, name, value):
        self.headers.append("%s: %s" % (name, value))

    def clearHeaders(self):
        self.headers = []

    def close(self):
        """ cleanup, unusable after this """
        self.rep.close()
        if hasattr(self, "cj"):
            del self.cj
        if hasattr(self, "c"):
            self.c.close()
            del self.c
예제 #38
0
def run_test(mytest, test_config=TestConfig(), context=None):
    """ Put together test pieces: configure & run actual test, return results """

    # Initialize a context if not supplied
    my_context = context
    if my_context is None:
        my_context = Context()

    mytest.update_context_before(my_context)
    templated_test = mytest.realize(my_context)
    curl = templated_test.configure_curl(timeout=test_config.timeout,
                                         context=my_context)
    result = TestResponse()
    result.test = templated_test

    # reset the body, it holds values from previous runs otherwise
    headers = MyIO()
    body = MyIO()
    curl.setopt(pycurl.WRITEFUNCTION, body.write)
    curl.setopt(pycurl.HEADERFUNCTION, headers.write)
    if test_config.verbose:
        curl.setopt(pycurl.VERBOSE, True)
    if test_config.ssl_insecure:
        curl.setopt(pycurl.SSL_VERIFYPEER, 0)
        curl.setopt(pycurl.SSL_VERIFYHOST, 0)

    result.passed = None

    if test_config.interactive:
        print("===================================")
        print("%s" % mytest.name)
        print("-----------------------------------")
        print("REQUEST:")
        print("%s %s" % (templated_test.method, templated_test.url))
        print("HEADERS:")
        print("%s" % (templated_test.headers))
        if mytest.body is not None:
            print("\n%s" % templated_test.body)
        raw_input("Press ENTER when ready (%d): " % (mytest.delay))

    if mytest.delay > 0:
        print("Delaying for %ds" % mytest.delay)
        time.sleep(mytest.delay)

    try:
        curl.perform()  # Run the actual call
    except Exception as e:
        # Curl exception occurred (network error), do not pass go, do not
        # collect $200
        trace = traceback.format_exc()
        result.failures.append(
            Failure(message="Curl Exception: {0}".format(e),
                    details=trace,
                    failure_type=validators.FAILURE_CURL_EXCEPTION))
        result.passed = False
        curl.close()
        return result

    # Retrieve values
    result.body = body.getvalue()
    body.close()
    result.response_headers = text_type(headers.getvalue(),
                                        HEADER_ENCODING)  # Per RFC 2616
    headers.close()

    response_code = curl.getinfo(pycurl.RESPONSE_CODE)
    result.response_code = response_code

    logger.debug("Initial Test Result, based on expected response code: " +
                 str(response_code in mytest.expected_status))

    if response_code in mytest.expected_status:
        result.passed = True
    else:
        # Invalid response code
        result.passed = False
        failure_message = "Invalid HTTP response code: response code {0} not in expected codes [{1}]".format(
            response_code, mytest.expected_status)
        result.failures.append(
            Failure(message=failure_message,
                    details=None,
                    failure_type=validators.FAILURE_INVALID_RESPONSE))

    # Parse HTTP headers
    try:
        result.response_headers = parse_headers(result.response_headers)
    except Exception as e:
        trace = traceback.format_exc()
        result.failures.append(
            Failure(message="Header parsing exception: {0}".format(e),
                    details=trace,
                    failure_type=validators.FAILURE_TEST_EXCEPTION))
        result.passed = False
        curl.close()
        return result

    # print str(test_config.print_bodies) + ',' + str(not result.passed) + ' ,
    # ' + str(test_config.print_bodies or not result.passed)

    head = result.response_headers

    # execute validator on body
    if result.passed is True:
        body = result.body
        if mytest.validators is not None and isinstance(
                mytest.validators, list):
            logger.debug("executing this many validators: " +
                         str(len(mytest.validators)))
            failures = result.failures
            for validator in mytest.validators:
                validate_result = validator.validate(body=body,
                                                     headers=head,
                                                     context=my_context)
                if not validate_result:
                    result.passed = False
                # Proxy for checking if it is a Failure object, because of
                # import issues with isinstance there
                if hasattr(validate_result, 'details'):
                    failures.append(validate_result)
                # TODO add printing of validation for interactive mode
        else:
            logger.debug("no validators found")

        # Only do context updates if test was successful
        mytest.update_context_after(result.body, head, my_context)

    # Print response body if override is set to print all *OR* if test failed
    # (to capture maybe a stack trace)
    if test_config.print_bodies or not result.passed:
        if test_config.interactive:
            print("RESPONSE:")
        print(result.body.decode(ESCAPE_DECODING))

    if test_config.print_headers or not result.passed:
        if test_config.interactive:
            print("RESPONSE HEADERS:")
        print(result.response_headers)

    # TODO add string escape on body output
    logger.debug(result)

    curl.close()
    return result
예제 #39
0
class WSCOC:
    "Interfaz para el WebService de Consulta de Operaciones Cambiarias"
    _public_methods_ = ['GenerarSolicitudCompraDivisa',
                        'GenerarSolicitudCompraDivisaTurExt',
                        'InformarSolicitudCompraDivisa',
                        'ConsultarCUIT',
                        'ConsultarCOC',
                        'AnularCOC',
                        'ConsultarSolicitudCompraDivisa',
                        'ConsultarSolicitudesCompraDivisas',
                        'ConsultarDestinosCompra',
                        'ConsultarTiposReferencia',
                        'ConsultarMonedas',
                        'ConsultarTiposDocumento',
                        'ConsultarTiposEstadoSolicitud',
                        'ConsultarMotivosExcepcionDJAI',
                        'ConsultarDestinosCompraDJAI',
                        'ConsultarMotivosExcepcionDJAS',
                        'ConsultarDestinosCompraDJAS',
                        'ConsultarDestinosCompraTipoReferencia',
                        'LeerSolicitudConsultada', 'LeerCUITConsultado',
                        'ConsultarDJAI', 'ConsultarDJAS', 'ConsultarReferencia',
                        'LeerError', 'LeerErrorFormato', 'LeerInconsistencia',
                        'LoadTestXML',
                        'AnalizarXml', 'ObtenerTagXml',
                        'Dummy', 'Conectar', 'DebugLog']
    _public_attrs_ = ['Token', 'Sign', 'Cuit',
        'AppServerStatus', 'DbServerStatus', 'AuthServerStatus',
        'XmlRequest', 'XmlResponse', 'Version', 'InstallDir', 
        'Resultado', 'Inconsistencias', 'ErrCode', 'ErrMsg',
        'CodigoSolicitud', 'FechaSolicitud', 'EstadoSolicitud', 'FechaEstado',
        'COC', 'FechaEmisionCOC', 'CodigoDestino',
        'CUITComprador', 'DenominacionComprador',
        'CodigoMoneda', 'CotizacionMoneda', 'MontoPesos',
        'CUITRepresentante', 'DenominacionRepresentante',
        'TipoDoc', 'NumeroDoc', 'CUITConsultada', 'DenominacionConsultada',
		'DJAI', 'CodigoExcepcionDJAI', 'DJAS', 'CodigoExcepcionDJAS',
        'MontoFOB', 'EstadoDJAI', 'EstadoDJAS', 'Estado', 'Tipo', 'Codigo',
        'ErroresFormato', 'Errores', 'Traceback', 'Excepcion', 'LanzarExcepciones',
        ]

    _reg_progid_ = "WSCOC"
    _reg_clsid_ = "{B30406CE-326A-46D9-B807-B7916E3F1B96}"

    Version = "%s %s %s" % (__version__, H**O and 'Homologación' or '', pysimplesoap.client.__file__)
    LanzarExcepciones = False

    def __init__(self):
        self.Token = self.Sign = self.Cuit = None
        self.AppServerStatus = None
        self.DbServerStatus = None
        self.AuthServerStatus = None
        self.XmlRequest = ''
        self.XmlResponse = ''
        self.Resultado = self.Motivo = self.Reproceso = ''
        self.__analizar_solicitud({})
        self.__analizar_inconsistencias({})
        self.__analizar_errores({})
        self.__detalles_solicitudes = None
        self.__detalles_cuit = None
        self.client = None
        self.ErrCode = self.ErrMsg = self.Traceback = self.Excepcion = ""
        self.EmisionTipo = ''
        self.Reprocesar = self.Reproceso = ''  # no implementado
        self.Log = None
        self.InstallDir = INSTALL_DIR

    def __analizar_errores(self, ret):
        "Comprueba y extrae errores si existen en la respuesta XML"
        self.Errores = []
        self.ErroresFormato = []
        if 'arrayErrores' in ret:
            errores = ret['arrayErrores']
            for error in errores:
                self.Errores.append("%s: %s" % (
                    error['codigoDescripcion']['codigo'],
                    error['codigoDescripcion']['descripcion'],
                    ))
        if 'arrayErroresFormato' in ret:
            errores = ret['arrayErroresFormato']
            for error in errores:
                self.ErroresFormato.append("%s: %s" % (
                    error['codigoDescripcionString']['codigo'],
                    error['codigoDescripcionString']['descripcion'],
                    ))

    def __analizar_solicitud(self, det):
        "Analiza y extrae los datos de una solicitud"
        self.CodigoSolicitud = det.get("codigoSolicitud")
        self.FechaSolicitud = det.get("fechaSolicitud")
        self.COC = str(det.get("coc"))
        self.FechaEmisionCOC = det.get("fechaEmisionCOC")
        self.EstadoSolicitud = det.get("estadoSolicitud")
        self.FechaEstado = det.get("fechaEstado")
        self.CUITComprador = str(det.get("detalleCUITComprador", 
                                     {}).get("cuit", ""))
        self.DenominacionComprador = det.get("detalleCUITComprador", 
                                             {}).get("denominacion")
        self.CodigoMoneda = det.get("codigoMoneda")
        self.CotizacionMoneda = det.get("cotizacionMoneda")
        self.MontoPesos = det.get("montoPesos")
        self.CUITRepresentante = str(det.get("DetalleCUITRepresentante", 
                                         {}).get("cuit", ""))
        self.DenominacionRepresentante = det.get("DetalleCUITRepresentante", 
                                                 {}).get("denominacion")
        self.CodigoDestino = det.get("codigoDestino")
        self.DJAI = det.get("djai")
        self.CodigoExcepcionDJAI = det.get("codigoExcepcionDJAI")
        self.DJAS = det.get("djas")
        self.CodigoExcepcionDJAS = det.get("codigoExcepcionDJAS")
        ref = det.get("referencia")
        if ref:
            self.Tipo = ref['tipo']
            self.Codigo = ref['codigo']

    def __analizar_inconsistencias(self, ret):
        "Comprueba y extrae (formatea) las inconsistencias"
        self.Inconsistencias = []
        if 'arrayInconsistencias' in ret:
            inconsistencias = ret['arrayInconsistencias']
            for inconsistencia in inconsistencias:
                self.Inconsistencias.append("%s: %s" % (
                    inconsistencia['codigoDescripcion']['codigo'],
                    inconsistencia['codigoDescripcion']['descripcion'],
                    ))

    def __log(self, msg):
        if not isinstance(msg, unicode):
            msg = unicode(msg, 'utf8', 'ignore')
        if not self.Log:
            self.Log = StringIO()
        self.Log.write(msg)
        self.Log.write('\n\r')
    
    def DebugLog(self):
        "Devolver y limpiar la bitácora de depuración"
        if self.Log:
            msg = self.Log.getvalue()
            # limpiar log
            self.Log.close()
            self.Log = None
        else:
            msg = u''
        return msg    

    @inicializar_y_capturar_excepciones
    def Conectar(self, cache=None, wsdl=None, proxy="", wrapper=None, cacert=None, timeout=None):
        # cliente soap del web service
        if timeout:
            self.__log("Estableciendo timeout=%s" % (timeout, ))
            socket.setdefaulttimeout(timeout)
        if wrapper:
            Http = set_http_wrapper(wrapper)
            self.Version = WSCOC.Version + " " + Http._wrapper_version
        proxy_dict = parse_proxy(proxy)
        location = LOCATION
        if H**O or not wsdl:
            wsdl = WSDL
        elif not wsdl.endswith("?wsdl") and wsdl.startswith("http"):
            location = wsdl
            wsdl += "?wsdl"
        elif wsdl.endswith("?wsdl"):
            location = wsdl[:-5]
        if not cache or H**O:
            # use 'cache' from installation base directory 
            cache = os.path.join(self.InstallDir, 'cache')
        self.__log("Conectando a wsdl=%s cache=%s proxy=%s" % (wsdl, cache, proxy_dict))
        self.client = SoapClient(
            wsdl = wsdl,        
            cache = cache,
            proxy = proxy_dict,
            ns="coc",
            cacert=cacert,
            soap_ns="soapenv",
            soap_server="jbossas6",
            trace = "--trace" in sys.argv)
        # corrijo ubicación del servidor (http en el WSDL)
        self.client.services['COCService']['ports']['COCServiceHttpSoap11Endpoint']['location'] = location
        self.__log("Corrigiendo location=%s" % (location, ))
        return True

    @inicializar_y_capturar_excepciones
    def Dummy(self):
        "Obtener el estado de los servidores de la AFIP"
        result = self.client.dummy()
        ret = result['dummyReturn']
        self.AppServerStatus = ret['appserver']
        self.DbServerStatus = ret['dbserver']
        self.AuthServerStatus = ret['authserver']
        return True
    
    @inicializar_y_capturar_excepciones
    def GenerarSolicitudCompraDivisa(self, cuit_comprador, codigo_moneda,
                                     cotizacion_moneda, monto_pesos,
                                    cuit_representante, codigo_destino,
									djai=None, codigo_excepcion_djai=None,
                                    djas=None, codigo_excepcion_djas=None,
                                    tipo=None, codigo=None,
                                    ):
        "Generar una Solicitud de operación cambiaria"
        if tipo and codigo:
            referencia = {'tipo': tipo, 'codigo': codigo}
        else:
            referencia = None
        res = self.client.generarSolicitudCompraDivisa(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            cuitComprador=cuit_comprador,
            codigoMoneda=codigo_moneda,
            cotizacionMoneda=cotizacion_moneda,
            montoPesos=monto_pesos,
            cuitRepresentante=cuit_representante,
            codigoDestino=codigo_destino,
			djai=djai, codigoExcepcionDJAI=codigo_excepcion_djai,
            djas=djas, codigoExcepcionDJAS=codigo_excepcion_djas,
            referencia=referencia,
            )

        self.Resultado = ""
        ret = res.get('generarSolicitudCompraDivisaReturn', {})
        self.Resultado = ret.get('resultado')
        det = ret.get('detalleSolicitud', {})
        self.__analizar_solicitud(det)
        self.__analizar_inconsistencias(det)
        self.__analizar_errores(ret)

        return True

    @inicializar_y_capturar_excepciones
    def GenerarSolicitudCompraDivisaTurExt(self, tipo_doc, numero_doc, apellido_nombre,
                                    codigo_moneda, cotizacion_moneda, monto_pesos,
                                    cuit_representante, codigo_destino,
                                    ):
        "Generar una Solicitud de operación cambiaria"
        res = self.client.generarSolicitudCompraDivisaTurExt(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            detalleTurExtComprador={
                'tipoNumeroDoc': {'tipoDoc': tipo_doc, 'numeroDoc': numero_doc},
                'apellidoNombre': apellido_nombre},
            codigoMoneda=codigo_moneda,
            cotizacionMoneda=cotizacion_moneda,
            montoPesos=monto_pesos,
            cuitRepresentante=cuit_representante,
            codigoDestino=codigo_destino,
            )

        self.Resultado = ""
        ret = res.get('generarSolicitudCompraDivisaTurExtReturn', {})
        self.Resultado = ret.get('resultado')
        det = ret.get('detalleSolicitud', {})
        self.__analizar_solicitud(det)
        self.__analizar_inconsistencias(det)
        self.__analizar_errores(ret)
        
        return True

    @inicializar_y_capturar_excepciones
    def InformarSolicitudCompraDivisa(self, codigo_solicitud, nuevo_estado):
        "Informar la aceptación o desistir una solicitud generada con anterioridad"

        res = self.client.informarSolicitudCompraDivisa(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            codigoSolicitud=codigo_solicitud,
            nuevoEstado=nuevo_estado,
        )

        self.Resultado = ""
        ret = res.get('informarSolicitudCompraDivisaReturn', {})
        self.Resultado = ret.get('resultado')
        self.__analizar_solicitud(ret)
        self.__analizar_errores(ret)
        return True


    @inicializar_y_capturar_excepciones
    def ConsultarCUIT(self, numero_doc, tipo_doc=80, sep="|"):
        "Consultar la CUIT, CDI ó CUIL, según corresponda, para un determinado tipo y número de documento."
        
        res = self.client.consultarCUIT(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            tipoNumeroDoc={'tipoDoc': tipo_doc, 'numeroDoc': numero_doc}
        )

        self.__detalles_cuit = []
        
        if 'consultarCUITReturn' in res:
            ret = res['consultarCUITReturn']
            self.__analizar_errores(ret)
            if 'tipoNumeroDoc' in ret:
                self.TipoDoc = ret['tipoNumeroDoc']['tipoDoc']
                self.NumeroDoc = ret['tipoNumeroDoc']['numeroDoc']
                for detalle in ret.get('arrayDetallesCUIT', []):
                    # agrego el detalle para consultarlo luego (LeerCUITConsultado)
                    det = detalle['detalleCUIT']
                    self.__detalles_cuit.append(det)
                # devuelvo una lista de cuit/denominación
                return [(u"%(cuit)s\t%(denominacion)s" % 
                            d['detalleCUIT']).replace("\t", sep)
                        for d in ret.get('arrayDetallesCUIT', [])]
            else:
                return []
        else:
            self.TipoDoc = None
            self.NumeroDoc = None
            return [""]

    def LeerCUITConsultado(self):
        "Recorro los CUIT devueltos por ConsultarCUIT"
        
        if self.__detalles_cuit:
            # extraigo el primer item
            det = self.__detalles_cuit.pop(0)
            self.CUITConsultada = str(det['cuit'])
            self.DenominacionConsultada = str(det['denominacion'])
            return True
        else:
            return False

    @inicializar_y_capturar_excepciones
    def ConsultarCOC(self, coc):
        "Obtener los datos de un COC existente"

        res = self.client.consultarCOC(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            coc=coc,
        )
        ret = res.get('consultarCOCReturn', {})
        det = ret.get('detalleSolicitud', {})
        self.__analizar_solicitud(det)
        self.__analizar_inconsistencias(det)
        self.__analizar_errores(ret)
        return True

    @inicializar_y_capturar_excepciones
    def AnularCOC(self, coc, cuit_comprador):
        "Anular COC existente (estado CO 24hs)"
        res = self.client.anularCOC(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            coc=coc,
            cuitComprador=cuit_comprador,
            )
        
        ret = res.get('anularCOCReturn', {})
        self.__analizar_solicitud(ret)
        self.__analizar_inconsistencias(ret)
        self.__analizar_errores(ret)
        
        return True


    @inicializar_y_capturar_excepciones
    def ConsultarSolicitudCompraDivisa(self, codigo_solicitud):
        "Consultar una Solicitud de Operación Cambiaria"
        res = self.client.consultarSolicitudCompraDivisa(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            codigoSolicitud=codigo_solicitud,
            )

        ret = res.get('consultarSolicitudCompraDivisaReturn', {})
        det = ret.get('detalleSolicitud', {})
        self.__analizar_solicitud(det)
        self.__analizar_errores(ret)

        return True

    @inicializar_y_capturar_excepciones
    def ConsultarSolicitudesCompraDivisas(self, cuit_comprador, 
                                          estado_solicitud,
                                          fecha_emision_desde,
                                          fecha_emision_hasta,
                                        ):
        "Consultar Solicitudes de operaciones cambiarias"
        res = self.client.consultarSolicitudesCompraDivisas(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            cuitComprador=cuit_comprador,
            estadoSolicitud=estado_solicitud,
            fechaEmisionDesde=fecha_emision_desde,
            fechaEmisionHasta=fecha_emision_hasta,
            )

        self.__analizar_errores(res)
        
        ret = res.get('consultarSolicitudesCompraDivisasReturn', {})
        solicitudes = []                    # códigos a devolver
        self.__detalles_solicitudes = []    # diccionario para recorrerlo luego
        for array in ret.get('arrayDetallesSolicitudes', []):
            det = array['detalleSolicitudes']
            # guardo el detalle para procesarlo luego (LeerSolicitudConsultada)
            self.__detalles_solicitudes.append(det) 
            # devuelvo solo el código de solicitud
            solicitudes.append(det.get("codigoSolicitud"))            
        return solicitudes


    def LeerSolicitudConsultada(self):
        "Proceso de a una solicitud los detalles devueltos por ConsultarSolicitudesCompraDivisas"
        if self.__detalles_solicitudes:
            # extraigo el primer item
            det = self.__detalles_solicitudes.pop(0)
            self.__analizar_solicitud(det)
            self.__analizar_errores(det)
            self.__analizar_inconsistencias(det)
            return True
        else:
            return False

    @inicializar_y_capturar_excepciones
    def ConsultarDJAI(self, djai, cuit):
        "Consultar Declaración Jurada Anticipada de Importación"
        res = self.client.consultarDJAI(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            djai=djai, cuit=cuit,
            )

        ret = res.get('consultarDJAIReturn', {})
        self.__analizar_errores(ret)
        self.DJAI = ret.get('djai')
        self.MontoFOB = ret.get('montoFOB')
        self.CodigoMoneda = ret.get('codigoMoneda')
        self.EstadoDJAI = ret.get('estadoDJAI')
        return True

    @inicializar_y_capturar_excepciones
    def ConsultarDJAS(self, djas, cuit):
        "Consultar Declaración Jurada Anticipada de Servicios"
        res = self.client.consultarDJAS(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            djas=djas, cuit=cuit,
            )

        ret = res.get('consultarDJASReturn', {})
        self.__analizar_errores(ret)
        self.DJAS = ret.get('djas')
        self.MontoFOB = ret.get('montoFOB')
        self.CodigoMoneda = ret.get('codigoMoneda')
        self.EstadoDJAS = ret.get('estadoDJAS')
        return True

    @inicializar_y_capturar_excepciones
    def ConsultarReferencia(self, tipo, codigo):
        "Consultar una determinada referencia según su tipo (1: DJAI, 2: DJAS, 3: DJAT)"
        res = self.client.consultarReferencia(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            referencia={'tipo': tipo, 'codigo': codigo},
            )

        ret = res.get('consultarReferenciaReturn', {})
        self.__analizar_errores(ret)
        #self.Codigo = ret.get('codigo')
        self.MontoFOB = ret.get('monto')
        self.CodigoMoneda = ret.get('codigoMoneda')
        self.Estado = ret.get('estado')
        return True
        
    @inicializar_y_capturar_excepciones
    def ConsultarMonedas(self, sep="|"):
        "Este método retorna el universo de Monedas disponibles en el presente WS, indicando código y descripción de cada una"
        res = self.client.consultarMonedas(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            )
        ret = res['consultarMonedasReturn']
        return [("%(codigo)s\t%(descripcion)s" 
                    % p['codigoDescripcion']).replace("\t", sep)
                 for p in ret['arrayMonedas']]

    @inicializar_y_capturar_excepciones
    def ConsultarDestinosCompra(self, sep="|"):
        "Consultar Tipos de Destinos de compra de divisas"
        res = self.client.consultarDestinosCompra(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            )
        #<consultarDestinosCompraReturn>22.02 m
        #<arrayDestinos>
        #<destinos>
        #<tipoDestino>TipoDestinoSimpleType</tipoDestino>
        #<arrayCodigosDescripciones>
        #<codigoDescripcion>
        #<codigo>short</codigo>
        #<descripcion>string</descripcion>
        #</codigoDescripcion>
        #</arrayCodigosDescripciones>
        #</destinos>
        #</arrayDestinos>
        ret = res['consultarDestinosCompraReturn']
        dest = []
        for array in ret['arrayDestinos']:
            destino = array['destinos']
            codigos = [("%s\t%s\t%s" 
                    % (destino['tipoDestino'], 
                       p['codigoDescripcion']['codigo'],
                       p['codigoDescripcion']['descripcion'],
                    )).replace("\t", sep)
                 for p in destino['arrayCodigosDescripciones']]
            dest.extend(codigos)
        return dest

    @inicializar_y_capturar_excepciones
    def ConsultarTiposDocumento(self, sep="|"):
        "Consultar Tipos de Documentos"
        res = self.client.consultarTiposDocumento(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            )
        ret = res['consultarTiposDocumentoReturn']
        return [("%(codigo)s\t%(descripcion)s" 
                    % p['codigoDescripcion']).replace("\t", sep)
                 for p in ret['arrayTiposDocumento']]

    @inicializar_y_capturar_excepciones
    def ConsultarTiposEstadoSolicitud(self, sep="|"):
        "Este método devuelve los diferentes tipos de estado que puede tener una solicitud."
        res = self.client.consultarTiposEstadoSolicitud(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            )
        ret = res['consultarTiposEstadoSolicitudReturn']
        return [("%(codigo)s\t%(descripcion)s" 
                    % p['codigoDescripcionString']).replace("\t", sep)
                 for p in ret['arrayTiposEstadoSolicitud']]

    @inicializar_y_capturar_excepciones
    def ConsultarMotivosExcepcionDJAI(self, sep='|'):
        "Este método retorna el universo de motivos de excepciones a la Declaración Jurada Anticipada de Importación"
        res = self.client.consultarMotivosExcepcionDJAI(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            )
        ret = res['consultarMotivosExcepcionDJAIReturn']
        return [("%(codigo)s\t%(descripcion)s" 
                    % p['codigoDescripcion']).replace("\t", sep)
                 for p in ret['arrayMotivosExcepcion']]

    @inicializar_y_capturar_excepciones
    def ConsultarDestinosCompraDJAI(self, sep='|'):
        "Este método retorna el subconjunto de los destinos de compra de divisas alcanzados por las normativas de la Declaración Jurada Anticipada de Importación."
        res = self.client.consultarDestinosCompraDJAI(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            )
        ret = res['consultarDestinosCompraDJAIReturn']
        return [("%(codigo)s\t%(descripcion)s" 
                    % p['codigoDescripcion']).replace("\t", sep)
                 for p in ret['arrayCodigosDescripciones']]

    @inicializar_y_capturar_excepciones
    def ConsultarMotivosExcepcionDJAS(self, sep='|'):
        "Este método retorna el universo de motivos de excepciones a la Declaración Jurada Anticipada de Servicios"
        res = self.client.consultarMotivosExcepcionDJAS(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            )
        ret = res['consultarMotivosExcepcionDJASReturn']
        return [("%(codigo)s\t%(descripcion)s" 
                    % p['codigoDescripcion']).replace("\t", sep)
                 for p in ret['arrayMotivosExcepcion']]

    @inicializar_y_capturar_excepciones
    def ConsultarDestinosCompraDJAS(self, sep='|'):
        "Este método retorna el subconjunto de los destinos de compra de divisas alcanzados por las normativas de la Declaración Jurada Anticipada de Servicios"
        res = self.client.consultarDestinosCompraDJAS(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            )
        ret = res['consultarDestinosCompraDJASReturn']
        return [("%(codigo)s\t%(descripcion)s" 
                    % p['codigoDescripcion']).replace("\t", sep)
                 for p in ret['arrayCodigosDescripciones']]

    @inicializar_y_capturar_excepciones
    def ConsultarTiposReferencia(self, sep='|'):
        "Este método retorna el conjunto de los tipos de referencia que pueden ser utilizados en la generación de una solicitud de compra de divisas según corresponda."
        res = self.client.consultarTiposReferencia(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            )
        ret = res['consultarTiposReferenciaReturn']
        return [("%(codigo)s\t%(descripcion)s" 
                    % p['codigoDescripcion']).replace("\t", sep)
                 for p in ret['arrayCodigosDescripciones']]
                 
    @inicializar_y_capturar_excepciones
    def ConsultarDestinosCompraTipoReferencia(self, tipo, sep='|'):
        "Este método retorna el subconjunto de los destinos de compra de divisas alcanzados por algunas de las normativas vigentes según el tipo de referencia requerido"
        res = self.client.consultarDestinosCompraTipoReferencia(
            authRequest={'token': self.Token, 'sign': self.Sign, 'cuitRepresentada': self.Cuit},
            tipo=tipo,
            )
        ret = res['consultarDestinosCompraTipoReferenciaReturn']
        return [("%(codigo)s\t%(descripcion)s" 
                    % p['codigoDescripcion']).replace("\t", sep)
                 for p in ret['arrayCodigosDescripciones']]

    def LeerError(self):
        "Recorro los errores devueltos y devuelvo el primero si existe"
        
        if self.Errores:
            # extraigo el primer item
            er = self.Errores.pop(0)
            return er
        else:
            return ""

    def LeerErrorFormato(self):
        "Recorro los errores de formatos devueltos y devuelvo el primero si existe"
        
        if self.ErroresFormato:
            # extraigo el primer item
            er = self.ErroresFormato.pop(0)
            return er
        else:
            return ""

    def LeerInconsistencia(self):
        "Recorro las inconsistencias devueltas y devuelvo la primera si existe"
        
        if self.Inconsistencias:
            # extraigo el primer item
            er = self.Inconsistencias.pop(0)
            return er
        else:
            return ""

    def LoadTestXML(self, xml_file):
        class DummyHTTP:
            def __init__(self, xml_response):
                self.xml_response = xml_response
            def request(self, location, method, body, headers):
                return {}, self.xml_response
        self.client.http = DummyHTTP(open(xml_file).read())

    def AnalizarXml(self, xml=""):
        "Analiza un mensaje XML (por defecto la respuesta)"
        try:
            if not xml or xml=='XmlResponse':
                xml = self.XmlResponse 
            elif xml=='XmlRequest':
                xml = self.XmlRequest 
            self.xml = SimpleXMLElement(xml)
            return True
        except Exception, e:
            self.Excepcion = u"%s" % (e)
            return False
예제 #40
0
def outputPython(specification, options, pyFile):
    """
    Outputs Python struct file.

    Given the specification construct a valid Python struct
    file that describes all the binary packets.

    Args:
        specification (dict): The specification object.
        options (dict):       A dictionary of options to
                              modify output.
        pyFile (file):        A file-like object to which
                              to save the struct code.
    """
    assert isinstance(specification, dict)
    assert isinstance(options, dict)
    assert hasattr(pyFile, 'write')
    packetLengths = {}
    writeOut(pyFile, '#!/usr/bin/env python')
    writeOut(pyFile, '# -*- coding: utf-8 -*-')
    writeOut(pyFile, '"""')
    writeOut(pyFile, specification['title'])
    if 'description' in specification:
        writeOut(pyFile, '')
        writeOutBlock(pyFile, specification['description'])
    for tag in ('version', 'date', 'author', 'documentation', 'metadata'):
        if tag in specification:
            writeOut(pyFile, '')
            writeOut(pyFile, '{}: {}'.format(tag.title(), specification[tag]))
    writeOut(pyFile, '"""')
    writeOut(pyFile, '')
    writeOut(pyFile, 'from struct import calcsize, pack, unpack_from')
    writeOut(pyFile, 'from zope.interface import directlyProvides, Interface')
    writeOut(pyFile, '')
    writeOut(pyFile, '')
    prefix = '    '

    # Create interfaces for testing and documenting
    extensionlessName = options['pyFilename'].split('.')[0]
    extensionlessName = extensionlessName[0].upper() + extensionlessName[1:]
    writeOut(pyFile, 'class I{}Length(Interface):'.format(extensionlessName))
    writeOut(pyFile, '"""', prefix)
    writeOut(pyFile, 'A binary packet length calculator', prefix)
    writeOut(pyFile, '')
    writeOut(pyFile, 'Interface for an entity that returns the length ' \
             + 'of a binary packet buffer.', prefix)
    writeOut(pyFile, '"""', prefix)
    writeOut(pyFile, 'def __call__():', prefix)
    writeOut(pyFile, '"""Returns the length of the object in bytes."""',
             2 * prefix)
    writeOut(pyFile, '')
    writeOut(pyFile, '')
    writeOut(pyFile, 'class I{}Packer(Interface):'.format(extensionlessName))
    writeOut(pyFile, '"""', prefix)
    writeOut(pyFile, 'A binary data packer', prefix)
    writeOut(pyFile, '')
    writeOut(pyFile, 'Interface for an entity that packs binary data.', prefix)
    writeOut(pyFile, '"""', prefix)
    writeOut(pyFile, 'def __call__(packet):', prefix)
    writeOut(pyFile, '"""Packs a packet dict into a string."""', 2 * prefix)
    writeOut(pyFile, '')
    writeOut(pyFile, '')
    writeOut(pyFile, 'class I{}Unpacker(Interface):'.format(extensionlessName))
    writeOut(pyFile, '"""', prefix)
    writeOut(pyFile, 'A binary data unpacker', prefix)
    writeOut(pyFile, '')
    writeOut(pyFile, 'Interface for an entity that unpacks binary data.',
             prefix)
    writeOut(pyFile, '"""', prefix)
    writeOut(pyFile, 'def __call__(buffer):', prefix)
    writeOut(pyFile, '"""Unpacks a binary string into a dict."""', 2 * prefix)
    writeOut(pyFile, '')
    writeOut(pyFile, '')

    # Parse the enumerations
    newLocals = outputEnumerations(specification['enums'].items(), options,
                                   pyFile)
    # The following is a little ugly but places the enumerations
    # in the current namespace so that they may be referenced
    # when evaluating formats.
    for optionName, value in newLocals:
        if varNameRE.match(optionName) and exprRE.match(str(value)):
            exec '{} = {}'.format(optionName, value)

    # Parse the structure
    for packetName, packet in specification['packets'].items():
        structDefList = []
        structAccretions = {
            'formatList': [],
            'countList': [],
            'varList': [],
            'bitFields': [],
            'titles': [],
            'descriptions': []
        }
        bitFieldCount = populateWorkLists(packet, specification, structDefList,
                                          structAccretions)

        # Create the get length function
        writeOut(pyFile, 'def get_{}_len():'.format(packetName))
        writeOut(pyFile, '"""', prefix)
        writeOut(pyFile, "Calculates the size of {}.".format(packetName),
                 prefix)
        writeOut(pyFile, '')
        writeOut(
            pyFile,
            "Calculates the total size of the {} structure".format(packetName),
            prefix)
        writeOut(pyFile, "(including any internal substructures).", prefix)
        writeOut(pyFile, '')
        writeOut(pyFile, 'Returns:', prefix)
        writeOut(pyFile, 'The size of {}.'.format(packetName), 2 * prefix)
        # The following section determines how many bytes a packet
        # consists of so we can make good doctests. To do so it
        # evaluates the expressions used for the format descriptions.
        packetLen = 0
        try:
            for structDef in structDefList:
                if structDef['type'] == 'segment':
                    assert structFmtRE.match(structDef['fmt'])
                    packetLen += calcsize(eval(structDef['fmt']))
                elif structDef['type'] == 'substructure':
                    packetLen += packetLengths[structDef['itemType']]
            packetLengths[packetName] = packetLen
            writeOut(pyFile, '')
            writeOut(pyFile, 'Examples:', prefix)
            writeOut(pyFile, '>>> get_{}_len()'.format(packetName), prefix * 2)
            writeOut(pyFile, '{}'.format(packetLen), prefix * 2)
        except KeyError:
            # If we can't evaluate it don't bother with a doctest
            # for this one. This can happen if dependencies get
            # defined after they're used.
            pass
        writeOut(pyFile, '"""', prefix)
        # Create the function itself.
        formatStrList = [
            structDef['fmt'] for structDef in structDefList
            if structDef['type'] == 'segment'
        ]
        if not formatStrList:
            writeOut(pyFile, 'totalSize = 0', prefix)
        else:
            writeOut(
                pyFile, 'totalSize = calcsize({})'.format(
                    ') + calcsize('.join(formatStrList)), prefix)
        substructureList = [
            structDef['itemType'] for structDef in structDefList
            if structDef['type'] == 'substructure'
        ]
        for substruct in substructureList:
            writeOut(pyFile, 'totalSize += get_{}_len()'.format(substruct),
                     prefix)
        writeOut(pyFile, 'return totalSize', prefix)
        writeOut(
            pyFile, 'directlyProvides(get_{}_len, I{}Length)'.format(
                packetName, extensionlessName))
        writeOut(pyFile, '')
        writeOut(pyFile, '')

        # Create the pack function
        writeOut(pyFile, 'def pack_{}(packet):'.format(packetName))
        writeOut(pyFile, '"""', prefix)
        writeOut(pyFile, "Packs a {} packet.".format(packetName), prefix)
        if 'description' in packet:
            writeOut(pyFile, '')
            writeOutBlock(pyFile, packet['description'], prefix)
        writeOut(pyFile, '')
        writeOut(pyFile, 'Args:', prefix)
        writeOut(pyFile, 'packet (dict): A dictionary of data to be packed.',
                 2 * prefix)
        writeOut(pyFile, '')
        writeOut(pyFile, 'Returns:', prefix)
        writeOut(pyFile, 'A binary string containing the packed data.',
                 2 * prefix)
        writeOut(pyFile, '"""', prefix)
        writeOut(pyFile, 'assert isinstance(packet, dict)', prefix)
        writeOut(pyFile, 'outList = []', prefix)
        for structDef in structDefList:
            if structDef['type'] == 'segment':
                for fragNum, (bitFieldName, bitFieldNum, bitFieldSize,
                              bitFieldLabel) in enumerate(
                                  reversed(structDef['bitFields'])):
                    if fragNum == 0:
                        writeOut(
                            pyFile,
                            'bitField{} = {}'.format(bitFieldNum,
                                                     bitFieldName), prefix)
                    else:
                        writeOut(
                            pyFile,
                            'bitField{} <<= {}'.format(bitFieldNum,
                                                       bitFieldSize), prefix)
                        writeOut(
                            pyFile,
                            'bitField{} |= {}'.format(bitFieldNum,
                                                      bitFieldName), prefix)
                writeOut(
                    pyFile, 'outList.append(pack({}, {}))'.format(
                        structDef['fmt'], structDef['vars'][1:-1]), prefix)
            elif structDef['type'] == 'substructure':
                writeOut(
                    pyFile, 'outList.append(pack_{}(packet["{}"]))'.format(
                        structDef['itemType'], structDef['itemName']), prefix)
        writeOut(pyFile, 'return "".join(outList)', prefix)
        writeOut(
            pyFile, 'directlyProvides(pack_{}, I{}Packer)'.format(
                packetName, extensionlessName))
        writeOut(pyFile, '')
        writeOut(pyFile, '')

        # Create the unpack function
        writeOut(pyFile, 'def unpack_{}(rawData):'.format(packetName))
        writeOut(pyFile, '"""', prefix)
        if 'title' in packet:
            writeOut(pyFile, packet['title'], prefix)
        else:
            writeOut(pyFile, packetName, prefix)
        if 'description' in packet:
            writeOut(pyFile, '')
            writeOutBlock(pyFile, packet['description'], prefix)
        writeOut(pyFile, '')
        writeOut(pyFile, 'Args:', prefix)
        writeOut(pyFile, 'rawData (str): The raw binary data to be unpacked.',
                 2 * prefix)
        writeOut(pyFile, '')
        writeOut(pyFile, 'Returns:', prefix)
        writeOut(pyFile, 'A dictionary of the unpacked data.', 2 * prefix)
        # Write out the next bit to a temporary buffer.
        outBufStr = StringIO()
        writeOut(outBufStr, '"""', prefix)
        writeOut(outBufStr, 'assert isinstance(rawData, str)', prefix)
        writeOut(outBufStr, 'packet = {}', prefix)
        writeOut(outBufStr, 'position = 0', prefix)
        for structDef in structDefList:
            line = []
            if structDef['type'] == 'segment':
                line.append('segmentFmt = {}{}'.format(structDef['fmt'],
                                                       linesep))
                line.append('{}segmentLen = calcsize(segmentFmt){}'.format(
                    prefix, linesep))
                line.append(
                    '{}{} = unpack_from(segmentFmt, rawData, position){}'.
                    format(prefix, structDef['vars'], linesep))
                line.append('{}position += segmentLen{}'.format(
                    prefix, linesep))
                for fragNum, (bitFieldName, bitFieldNum, bitFieldSize,
                              bitFieldLabel) in enumerate(
                                  structDef['bitFields']):
                    bitFieldMask = hex(int(pow(2, bitFieldSize)) - 1)
                    if isFloatType(bitFieldLabel):
                        bitFieldType = 'float'
                    elif isBooleanType(bitFieldLabel):
                        bitFieldType = 'bool'
                    elif isStringType(bitFieldLabel):
                        bitFieldType = 'str'
                    else:
                        bitFieldType = 'int'
                    line.append("{}{} = {}(bitField{} & {}){}".format(
                        prefix, bitFieldName, bitFieldType, bitFieldNum,
                        bitFieldMask, linesep))
                    if fragNum < len(structDef['bitFields']) - 1:
                        line.append("{}bitField{} >>= {}{}".format(
                            prefix, bitFieldNum, bitFieldSize, linesep))
                if line[-1].endswith(linesep):
                    line[-1] = line[-1][:-len(linesep)]
            elif structDef['type'] == 'substructure':
                if structDef['description']:
                    writeOut(outBufStr, '')
                    writeOutBlock(outBufStr, structDef['description'],
                                  '    # ')
                line.append(
                    "packet['{}'] = unpack_{}(rawData[position:]){}".format(
                        structDef['itemName'], structDef['itemType'], linesep))
                line.append("{}position += get_{}_len()".format(
                    prefix, structDef['itemType']))
                if structDef['title']:
                    line.append(' # {}'.format(structDef['title']))
            if line:
                writeOut(outBufStr, ''.join(line), prefix)
        writeOut(outBufStr, 'return packet', prefix)
        writeOut(
            outBufStr, 'directlyProvides(unpack_{}, I{}Unpacker)'.format(
                packetName, extensionlessName))
        # Write the temporary buffer to the output file.
        writeOut(pyFile, outBufStr.getvalue())
        outBufStr.close()
        writeOut(pyFile, '')

        # Create the validate function
        writeOut(pyFile, 'def validate_{}(rawData):'.format(packetName))
        writeOut(pyFile, '"""', prefix)
        writeOut(pyFile, "Reads and validates a {} packet.".format(packetName),
                 prefix)
        writeOut(pyFile, '')
        writeOut(
            pyFile,
            "Reads a {} structure from raw binary data".format(packetName),
            prefix)
        writeOut(pyFile, "and validates it.", prefix)
        writeOut(pyFile, '')
        writeOut(pyFile, 'Args:', prefix)
        writeOut(pyFile, 'rawData (str): The raw binary data to be unpacked.',
                 2 * prefix)
        writeOut(pyFile, '')
        writeOut(pyFile, 'Returns:', prefix)
        writeOut(pyFile,
                 'A structure representing the {} packet.'.format(packetName),
                 2 * prefix)
        writeOut(pyFile, '"""', prefix)
        writeOut(pyFile, 'assert isinstance(rawData, str)', prefix)
        writeOut(pyFile, 'packet = get_{}(rawData)'.format(packetName), prefix)
        writeOut(pyFile, 'return packet', prefix)
        writeOut(pyFile, '')
        writeOut(pyFile, '')

    writeOut(pyFile, 'if __name__ == "__main__":')
    writeOut(pyFile, 'from zope.interface.verify import verifyObject', prefix)
    writeOut(pyFile, 'import doctest', prefix)
    writeOut(pyFile, 'doctest.testmod()', prefix)
 def _build_app_script(self):
     script_name = self.app_file('js')
     output = StringIO()
     self._concatenate_application_script(output)
     write_file(join(self.output_dir, script_name), minify_js(output.getvalue()))
     output.close()
예제 #42
0
 def __init__(self, value, e):
     s = StringIO()
     traceback.print_exc(file=s)
     self.value = (value, s.getvalue())
     s.close()
예제 #43
0
파일: alerts.py 프로젝트: iotest3/new
    def send_digests(self, context, period='daily'):
        PERIODS = {
            'daily': [IProfile.ALERT_DAILY_DIGEST],
            'weekly': [
                IProfile.ALERT_DAILY_DIGEST,
                IProfile.ALERT_WEEKLY_DIGEST,
            ],
            'biweekly': [
                IProfile.ALERT_DAILY_DIGEST,
                IProfile.ALERT_WEEKLY_DIGEST,
                IProfile.ALERT_BIWEEKLY_DIGEST,
            ],
        }
        periods = PERIODS[period]
        mailer = getUtility(IMailDelivery)

        system_name = get_setting(context, "system_name", "KARL")
        system_email_domain = get_setting(context, "system_email_domain")
        sent_from = "%s@%s" % (system_name, system_email_domain)
        from_addr = "%s <%s>" % (system_name, sent_from)
        subject = "[%s] Your alerts digest" % system_name

        template = get_renderer("email_digest.pt").implementation()
        for profile in find_profiles(context).values():
            if not list(profile._pending_alerts):
                continue

            # Perform each in its own transaction, so a problem with one
            # user's email doesn't block all others
            transaction.manager.begin()
            alerts = profile._pending_alerts.consume()
            try:
                pending = []
                skipped = []
                for alert in alerts:
                    community = alert.get('community')
                    if community is not None:
                        pref = profile.get_alerts_preference(community)
                        if pref in periods:
                            pending.append(alert)
                        else:
                            skipped.append(alert)
                    else:  # XXX belt-and-suspenders:  send it now
                        pending.append(alert)

                if len(pending) > 0:

                    attachments = []
                    for alert in pending:
                        attachments += alert['attachments']

                    msg = MIMEMultipart() if attachments else Message()
                    msg["From"] = from_addr
                    msg["To"] = "%s <%s>" % (profile.title, profile.email)
                    msg["Subject"] = subject

                    body_text = template.render(
                        system_name=system_name,
                        alerts=pending,
                    )

                    if isinstance(body_text, unicode):
                        body_text = body_text.encode("UTF-8")

                    if attachments:
                        body = MIMEText(body_text, 'html', 'utf-8')
                        msg.attach(body)
                    else:
                        msg.set_payload(body_text, "UTF-8")
                        msg.set_type("text/html")

                    for attachment in attachments:
                        msg.attach(attachment)

                    mailer.send([
                        profile.email,
                    ], msg)

                for alert in skipped:
                    profile._pending_alerts.append(alert)

                transaction.manager.commit()

            except Exception, e:
                # Log error and continue
                log.error("Error sending digest to %s <%s>" %
                          (profile.title, profile.email))

                b = StringIO()
                traceback.print_exc(file=b)
                log.error(b.getvalue())
                b.close()

                transaction.manager.abort()
예제 #44
0
    def Export(self, context, request):
        tray = 1
        now = DateTime().strftime('%Y%m%d-%H%M')
        uc = api.get_tool('uid_catalog')
        instrument = context.getInstrument()
        norm = getUtility(IIDNormalizer).normalize
        filename = '{}-{}.csv'.format(
            context.getId(), norm(instrument.getDataInterface()))
        listname = '{}_{}_{}'.format(
            context.getId(), norm(instrument.Title()), now)
        options = {
            'dilute_factor': 1,
            'method': 'F SO2 & T SO2'
        }
        for k, v in instrument.getDataInterfaceOptions():
            options[k] = v

        # for looking up "cup" number (= slot) of ARs
        parent_to_slot = {}
        layout = context.getLayout()
        for x in range(len(layout)):
            a_uid = layout[x]['analysis_uid']
            p_uid = uc(UID=a_uid)[0].getObject().aq_parent.UID()
            layout[x]['parent_uid'] = p_uid
            if p_uid not in parent_to_slot.keys():
                parent_to_slot[p_uid] = int(layout[x]['position'])

        # write rows, one per PARENT
        header = [listname, options['method']]
        rows = []
        rows.append(header)
        tmprows = []
        ARs_exported = []
        for x in range(len(layout)):
            # create batch header row
            c_uid = layout[x]['container_uid']
            p_uid = layout[x]['parent_uid']
            if p_uid in ARs_exported:
                continue
            cup = parent_to_slot[p_uid]
            tmprows.append([tray,
                            cup,
                            p_uid,
                            c_uid,
                            options['dilute_factor'],
                            ""])
            ARs_exported.append(p_uid)
        tmprows.sort(lambda a, b: cmp(a[1], b[1]))
        rows += tmprows

        ramdisk = StringIO()
        writer = csv.writer(ramdisk, delimiter=';')
        assert(writer)
        writer.writerows(rows)
        result = ramdisk.getvalue()
        ramdisk.close()

        # stream file to browser
        setheader = request.RESPONSE.setHeader
        setheader('Content-Length', len(result))
        setheader('Content-Type', 'text/comma-separated-values')
        setheader('Content-Disposition', 'inline; filename=%s' % filename)
        request.RESPONSE.write(result)
예제 #45
0
파일: w3c.py 프로젝트: xiaxichen/pyv8
class HTMLDocument(Document):
    title = xpath_property("/html/head/title/text()")
    body = xpath_property("/html/body[1]")

    images = xpath_property("//img", readonly=True)
    applets = xpath_property("//applet", readonly=True)
    forms = xpath_property("//form", readonly=True)
    links = xpath_property("//a[@href]", readonly=True)
    anchors = xpath_property("//a[@name]", readonly=True)

    def __init__(self,
                 doc,
                 win=None,
                 referer=None,
                 lastModified=None,
                 cookie=''):
        Document.__init__(self, doc)

        self._win = win
        self._referer = referer
        self._lastModified = lastModified
        self._cookie = cookie

        self._html = None

        self.current = None

    @property
    def window(self):
        return self._win

    @window.setter
    def window(self, win):
        self._win = win

    @property
    def referrer(self):
        return self._referer

    @property
    def lastModified(self):
        raise self._lastModified

    @property
    def cookie(self):
        return self._cookie

    @property
    def domain(self):
        return urlparse(self._win.url).hostname if self._win else ''

    @property
    def URL(self):
        return self._win.url if self._win else ''

    def open(self, mimetype='text/html', replace=False):
        self._html = StringIO()

        return self

    def close(self):
        html = self._html.getvalue()
        self._html.close()
        self._html = None

        self.doc = BeautifulSoup.BeautifulSoup(html)

    def write(self, html):
        if self._html:
            self._html.write(html)
        else:
            tag = self.current
            parent = tag.parent
            pos = parent.contents.index(tag) + 1

            for tag in BeautifulSoup.BeautifulSoup(html).contents:
                parent.insert(pos, tag)

                pos += 1

    def writeln(self, text):
        self.write(text + "\n")

    def getElementById(self, elementId):
        tag = self.doc.find(id=elementId)
        return DOMImplementation.createHTMLElement(self, tag) if tag else None

    def getElementsByName(self, elementName):
        tags = self.doc.findAll(attrs={'name': elementName})

        return HTMLCollection(self.doc, tags)
예제 #46
0
class Qdb(Bdb, object):
    """
    The Quantopian Remote Debugger.
    """
    _instance = None

    def __new__(cls, *args, **kwargs):
        """
        Qdb objects are singletons that persist until their disable method is
        called.
        """
        if not cls._instance:
            cls._instance = super(Qdb, cls).__new__(cls, *args, **kwargs)
        return cls._instance

    def __init__(self,
                 host='localhost',
                 port=8001,
                 auth_msg='',
                 default_file=None,
                 default_namespace=None,
                 eval_fn=None,
                 exception_serializer=None,
                 skip_fn=None,
                 pause_signal=None,
                 redirect_output=True,
                 retry_attepts=10,
                 uuid=None,
                 cmd_manager=None,
                 green=False,
                 repr_fn=None,
                 log_file=None,
                 execution_timeout=None):
        """
        Host and port define the address to connect to.
        The auth_msg is a message that will be sent with the start event to the
        server. This can be used to do server/tracer authentication.
        The default_file is a file to use if the file field is ommited from
        payloads.
        eval_fn is the function to eval code where the user may provide it,
        for example in a conditional breakpoint, or in the repl.
        skip_fn is simmilar to the skip list feature of Bdb, except that
        it should be a function that takes a filename and returns True iff
        the debugger should skip this file. These files will be suppressed from
        stack traces.
        The pause_signal is signal to raise in this program to trigger a pause
        command. If this is none, this will default to SIGUSR2.
        retry_attempts is the number of times to attempt to connect to the
        server before raising a QdbFailedToConnect error.
        The repr_fn is a function to use to convert objects to strings to send
        then back to the server. By default, this wraps repr by catching
        exceptions and reporting them to the user.
        The uuid is the identifier on the server for this session. If none is
        provided, it will generate a uuid4.
        cmd_manager should be a callable that takes a Qdb instance and manages
        commands by implementing a next_command method. If none, a new, default
        manager will be created that reads commands from the server at
        (host, port).
        If green is True, this will use gevent safe timeouts, otherwise this
        will use signal based timeouts.
        repr_fn is the repr function to use when displaying results. If None,
        use the builtin repr.
        execution_timeout is the amount of time user code has to execute before
        being cut short. This is applied to the repl, watchlist and conditional
        breakpoints. If None, no timeout is applied.
        """
        super(Qdb, self).__init__()
        self.address = host, port
        self.set_default_file(default_file)
        self.default_namespace = default_namespace or {}
        self.exception_serializer = exception_serializer or \
            default_exception_serializer
        self.eval_fn = eval_fn or default_eval_fn
        self.green = green
        self._file_cache = {}
        self.redirect_output = redirect_output
        self.retry_attepts = retry_attepts
        self.repr_fn = repr_fn
        self.skip_fn = skip_fn or (lambda _: False)
        self.pause_signal = pause_signal if pause_signal else signal.SIGUSR2
        self.uuid = str(uuid or uuid4())
        self.watchlist = {}
        self.execution_timeout = execution_timeout
        # We need to be able to send stdout back to the user debugging the
        # program. We hold a handle to this in case the program resets stdout.
        if self.redirect_output:
            self._old_stdout = sys.stdout
            self._old_stderr = sys.stderr
            self.stdout = StringIO()
            self.stderr = StringIO()
            sys.stdout = self.stdout
            sys.stderr = self.stderr
        self.forget()
        self.log_handler = None
        if log_file:
            self.log_handler = FileHandler(log_file)
            self.log_handler.push_application()
        if not cmd_manager:
            cmd_manager = RemoteCommandManager
        self.cmd_manager = cmd_manager(self)
        self.cmd_manager.start(auth_msg)

    def clear_output_buffers(self):
        """
        Clears the output buffers.
        """
        self.stdout.close()
        self.stderr.close()
        self.stdout = StringIO()
        self.stderr = StringIO()
        sys.stdout = self.stdout
        sys.stderr = self.stderr

    def restore_output_streams(self):
        """
        Restores the original output streams.
        """
        if self.redirect_output:
            sys.stdout = self._old_stdout
            sys.stderr = self._old_stderr

    def _new_execution_timeout(self, src):
        """
        Return a new execution timeout context manager.
        If not execution timeout is in place, returns ExitStack()
        """
        # We use green=False because this could be cpu bound. This will
        # still throw to the proper greenlet if this is gevented.
        return (Timeout(self.execution_timeout,
                        QdbExecutionTimeout(src, self.execution_timeout),
                        green=False)
                if self.execution_timeout else ExitStack())

    def set_default_file(self, filename):
        """
        Safely sets the new default file.
        """
        self.default_file = self.canonic(filename) if filename else None

    def get_line(self, filename, line):
        """
        Checks for any user cached files before deferring to the linecache.
        """
        # The line - 1 is so that querying line 1 gives us the first line in
        # the file.
        try:
            return self._get_file_lines(filename)[line - 1]
        except IndexError:
            return 'No source available for this line.'

    def get_file(self, filename):
        """
        Retrieves a file out of cache or opens and caches it.
        """
        return '\n'.join(self._get_file_lines(filename))

    def _get_file_lines(self, filename):
        """
        Retrieves the file from the file cache as a list of lines.
        If the file does not exist in the cache, it is cached from
        disk.
        """
        canonic_name = self.canonic(filename)
        try:
            return self._file_cache[canonic_name]
        except KeyError:
            if not self.cache_file(canonic_name):
                return []
            return self._file_cache.get(canonic_name)

    def cache_file(self, filename, contents=None):
        """
        Caches filename from disk into memory.
        This overrides whatever was cached for filename previously.
        If contents is provided, it allows the user to cache a filename to a
        string.
        Returns True if the file caching succeeded, otherwise returns false.
        """
        canonic_name = self.canonic(filename)
        if contents:
            self._file_cache[canonic_name] = contents.splitlines()
            return True
        try:
            with open(canonic_name, 'r') as f:
                self._file_cache[canonic_name] = map(
                    lambda l: l[:-1] if l.endswith('\n') else l, f.readlines())
                return True
        except IOError:
            # The caching operation failed.
            return False

    def set_break(self,
                  filename,
                  lineno,
                  temporary=False,
                  cond=None,
                  funcname=None,
                  **kwargs):
        """
        Sets a breakpoint. This is overridden to account for the filecache
        and for unreachable lines.
        **kwargs are ignored. This is to work with payloads that pass extra
        fields to the set_break payload.
        """
        filename = self.canonic(filename) if filename else self.default_file
        try:
            self.get_line(filename, lineno)
        except IndexError:
            raise QdbUnreachableBreakpoint({
                'file': filename,
                'line': lineno,
                'temp': temporary,
                'cond': cond,
                'func': funcname,
            })

        blist = self.breaks.setdefault(filename, [])
        if lineno not in blist:
            blist.append(lineno)
        Breakpoint(filename, lineno, temporary, cond, funcname)

    def clear_break(self, filename, lineno, *args, **kwargs):
        """
        Wrapper to make the breakpoint json standardized for setting
        and removing of breakpoints.
        This means that the same json data that was used to set a break point
        may be fed into this function with the extra values ignored.
        """
        super(Qdb, self).clear_break(filename, lineno)

    def canonic(self, filename):
        canonic_filename = super(Qdb, self).canonic(filename)
        if canonic_filename.endswith('pyc'):
            return canonic_filename[:-1]
        return canonic_filename

    def reset(self):
        self.botframe = None
        self._set_stopinfo(None, None)
        self.forget()

    def forget(self):
        self.lineno = None
        self.stack = []
        self.curindex = 0
        self.curframe = None

    def setup_stack(self, stackframe, traceback):
        """
        Sets up the state of the debugger object for this frame.
        """
        self.forget()
        self.stack, self.curindex = self.get_stack(stackframe, traceback)
        self.curframe = self.stack[self.curindex][0]
        self.curframe_locals = self.curframe.f_locals
        self.update_watchlist()

    def extend_watchlist(self, *args):
        """
        Adds every arg to the watchlist and updates.
        """
        for expr in args:
            self.watchlist[expr] = (False, '')

        self.update_watchlist()

    def update_watchlist(self):
        """
        Updates the watchlist by evaluating all the watched expressions in
        our current frame.
        """
        id_ = lambda n: n  # Why is this NOT a builtin?
        for expr in self.watchlist:
            try:
                with self._new_execution_timeout(expr), \
                        self.inject_default_namespace() as stackframe:
                    self.watchlist[expr] = (None, (self.repr_fn
                                                   or id_)(self.eval_fn(
                                                       expr, stackframe)))
            except Exception as e:
                self.watchlist[expr] = (type(e).__name__,
                                        self.exception_serializer(e))

    def effective(self, file, line, stackframe):
        """
        Finds the effective breakpoint for this line; called only
        when we know that there is a breakpoint here.

        returns the breakpoint paired with a flag denoting if we should
        remove this breakpoint or not.
        """
        for breakpoint in Breakpoint.bplist[file, line]:
            if breakpoint.enabled == 0:
                continue
            if not checkfuncname(breakpoint, stackframe):
                continue
            # Count every hit when breakpoint is enabled
            breakpoint.hits = breakpoint.hits + 1
            if not breakpoint.cond:
                # If unconditional, and ignoring go on to next, else break
                if breakpoint.ignore > 0:
                    breakpoint.ignore = breakpoint.ignore - 1
                    continue
                else:
                    return breakpoint, True
            else:
                # Conditional breakpoint
                # Ignore count applies only to those bpt hits where the
                # condition evaluates to true.
                try:
                    with self._new_execution_timeout(breakpoint.cond), \
                            self.inject_default_namespace(stackframe) as frame:
                        val = self.eval_fn(breakpoint.cond, frame, 'eval')
                except Exception as e:
                    # Send back a message to let the user know there was an
                    # issue with their breakpoint.
                    self.cmd_manager.send_error(
                        'condition', {
                            'cond': breakpoint.cond,
                            'line': line,
                            'exc': type(e).__name__,
                            'output': self.exception_serializer(e),
                        })
                    # Return this breakpoint to be safe. The user will be
                    # stopped here so that they can fix the breakpoint.
                    return breakpoint, False

                if val:
                    if breakpoint.ignore > 0:
                        breakpoint.ignore = breakpoint.ignore - 1
                    else:
                        return breakpoint, True
        return None, False

    def break_here(self, stackframe):
        """
        Checks if we should break execution in this stackframe.
        This function handles the cleanup and ignore counts for breakpoints.
        Returns True iff we should stop in the stackframe, False otherwise.
        """
        filename = self.canonic(stackframe.f_code.co_filename)
        if filename not in self.breaks:
            return False
        lineno = stackframe.f_lineno
        if lineno not in self.breaks[filename]:
            # The line itself has no breakpoint, but maybe the line is the
            # first line of a function with breakpoint set by function name.
            lineno = stackframe.f_code.co_firstlineno
            if lineno not in self.breaks[filename]:
                return False

        # flag says ok to delete temporary breakpoints.
        breakpoint, flag = self.effective(filename, lineno, stackframe)
        if breakpoint:
            self.currentbp = breakpoint.number
            if flag and breakpoint.temporary:
                self.do_clear(breakpoint.number)
            return True
        else:
            return False

    def trace_dispatch(self, stackframe, event, arg):
        """
        Trace function that does some preliminary checks and then defers to
        the event handler for each type of event.
        """
        if self.quitting:
            # We were told to quit by the user, bubble this up to their code.
            return

        if self.skip_fn(self.canonic(stackframe.f_code.co_filename)):
            # We want to skip this, don't stop but keep tracing.
            return self.trace_dispatch

        try:
            return super(Qdb, self).trace_dispatch(stackframe, event, arg)
        except BdbQuit:
            raise QdbQuit()  # Rewrap as a QdbError object.

    def user_call(self, stackframe, arg):
        if self.break_here(stackframe):
            self.user_line(stackframe)

    def user_line(self, stackframe):
        self.setup_stack(stackframe, None)
        self.cmd_manager.send_watchlist()
        self.cmd_manager.send_output()
        self.cmd_manager.send_stack()
        self.cmd_manager.next_command()

    def user_return(self, stackframe, return_value):
        stackframe.f_locals['__return__'] = return_value
        self.setup_stack(stackframe, None)
        self.cmd_manager.send_watchlist()
        self.cmd_manager.send_output()
        self.cmd_manager.send_stack()
        msg = fmt_msg('return', str(return_value), serial=pickle.dumps)
        self.cmd_manager.next_command(msg)

    def user_exception(self, stackframe, exc_info):
        exc_type, exc_value, exc_traceback = exc_info
        stackframe.f_locals['__exception__'] = exc_type, exc_value
        self.setup_stack(stackframe, exc_traceback)
        self.cmd_manager.send_watchlist()
        self.cmd_manager.send_output()
        self.cmd_manager.send_stack()
        msg = fmt_msg(
            'exception',
            {
                'type': str(exc_type),
                'value': str(exc_value),
                'traceback': traceback.format_tb(exc_traceback)
            },
            serial=pickle.dumps,
        )
        self.cmd_manager.next_command(msg)

    def do_clear(self, bpnum):
        """
        Handles deletion of temporary breakpoints.
        """
        if not (0 <= bpnum < len(Breakpoint.bpbynumber)):
            return
        self.clear_bpbynumber(bpnum)

    def set_quit(self):
        """
        Sets the quitting state and restores the program state.
        """
        self.quitting = True

    def disable(self, mode='soft'):
        """
        Stops tracing.
        """
        try:
            if mode == 'soft':
                self.clear_all_breaks()
                self.set_continue()
                # Remove this instance so that new ones may be created.
                self.__class__._instance = None
            elif mode == 'hard':
                sys.exit(1)
            else:
                raise ValueError("mode must be 'hard' or 'soft'")
        finally:
            self.restore_output_streams()
            if self.log_handler:
                self.log_handler.pop_application()
            self.cmd_manager.stop()

    def __enter__(self):
        self.set_trace(sys._getframe().f_back, stop=False)
        return self

    def __exit__(self, type, value, traceback):
        if isinstance(value, QdbQuit) or value is None:
            self.disable('soft')

    def set_trace(self, stackframe=None, stop=True):
        """
        Starts debugging in stackframe or in the callers frame.
        If stop is True, begin stepping from here, otherwise, wait for
        the first breakpoint or exception.
        """
        # We need to look back 1 frame to get our caller.
        stackframe = stackframe or sys._getframe().f_back
        self.reset()
        while stackframe:
            stackframe.f_trace = self.trace_dispatch
            self.botframe = stackframe
            stackframe = stackframe.f_back
        if stop:
            self.set_step()
        else:
            self.set_continue()
        sys.settrace(self.trace_dispatch)

    @contextmanager
    def inject_default_namespace(self, stackframe=None):
        """
        Adds the default namespace to the frame, or if no frame is provided,
        self.curframe is used.
        """
        stackframe = stackframe or self.curframe
        to_remove = set()
        for k, v in self.default_namespace.iteritems():
            if k not in stackframe.f_globals:
                # Only add the default things if the name is unbound.
                stackframe.f_globals[k] = v
                to_remove.add(k)

        try:
            yield stackframe
        finally:
            for k in to_remove:
                try:
                    del stackframe.f_globals[k]
                except IndexError:
                    # The body of this manager might have del'd this.
                    pass

            # Prevent exceptions from generating ref cycles.
            del stackframe
예제 #47
0
파일: persist.py 프로젝트: jbalint/spark
                     '# Modify this file at your own risk (and only while SPARK\n'+\
                     '# is not currently running).\n'+\
                     '###########################################################\n\n')
        # write object info first
        output.write(objOutput.getvalue())
        # then write predicate info that relies on objects existing
        output.write(predOutput.getvalue())
    finally:
        output.close()

    #remove oldest version and add new version
    # 5
    _roll_versions(agent, filename)
    # 6
    errValue = errOutput.getvalue()
    errOutput.close()
    if errValue:
        errFile = open(filename + ERRORS_SUFFIX, 'w')
        errFile.write("Errors for agent '%s'\n"%agent.name+\
                      "Date '%s'\n\n"%dateTime)
        errFile.write(errValue)
        errFile.close()

    global _lastPersistKbTime
    _lastPersistKbTime = time()
    elapsed = _lastPersistKbTime - startTime
    debug("persisting kb complete (%f secs).", elapsed)
    #get_sdl().logger.info("persistKb: %f secs"%elapsed)
    #print "PERSISTED KB TO", name
    #all done
예제 #48
0
def run(
        params=None,  # params for running from command line
        map_data=None,  # map_data, as_double()
        pdb_inp=None,
        pdb_hierarchy=None,
        crystal_symmetry=None,
        resolution=None,
        scattering_table='n_gaussian',
        smoothing_window=5,
        crossover_atom='CA',
        minimum_matching_atoms=3,
        minimum_length=2,
        dist_max=1.0,
        minimum_improvement=0.01,
        max_regions_to_test=10,
        max_ends_per_region=5,
        maximum_fraction=0.5,
        max_keep=10,
        map_coeffs_file=None,
        map_coeffs_labels=None,
        pdb_in_file=None,
        pdb_out=None,
        verbose=None,
        out=sys.stdout):

    if out is None: out = sys.stdout  # explode and refine calls it this way

    # get info from params if present
    if params:
        verbose = params.control.verbose
        map_coeffs_file = params.input_files.map_coeffs_file
        map_coeffs_labels = params.input_files.map_coeffs_labels
        pdb_in_file = params.input_files.pdb_in_file
        resolution = params.crystal_info.resolution
        scattering_table = params.crystal_info.scattering_table
        smoothing_window = params.crossover.smoothing_window
        crossover_atom = params.crossover.crossover_atom
        minimum_matching_atoms = params.crossover.minimum_matching_atoms
        minimum_length = params.crossover.minimum_length
        dist_max = params.crossover.dist_max
        minimum_improvement = params.crossover.minimum_improvement
        max_regions_to_test = params.crossover.max_regions_to_test
        max_ends_per_region = params.crossover.max_ends_per_region
        maximum_fraction = params.crossover.maximum_fraction
        max_keep = params.crossover.max_keep
        pdb_out = params.output_files.pdb_out

    # Consistency checks
    if (pdb_hierarchy is not None):
        assert pdb_in_file is None
        assert pdb_inp is None
        assert crystal_symmetry is not None
        # XXX more checks here!

    # Get map_data if not present
    if not map_data:
        if not map_coeffs_file or not os.path.isfile(map_coeffs_file):
            raise Sorry("Cannot find the map_coeffs_file '%s'" %
                        (str(map_coeffs_file)))
        from mmtbx.building.minimize_chain import get_map_coeffs
        map_coeffs = get_map_coeffs(map_coeffs_file,
                                    map_coeffs_labels=map_coeffs_labels)

        fft_map = map_coeffs.fft_map(resolution_factor=0.25)
        fft_map.apply_sigma_scaling()
        map_data = fft_map.real_map_unpadded()
        map_data = map_data.as_double()
        if map_coeffs and not crystal_symmetry:
            crystal_symmetry = map_coeffs.crystal_symmetry()
        if map_coeffs and not resolution:
            resolution = map_coeffs.d_min()

    # Get the starting model
    if (pdb_hierarchy is None):
        if pdb_inp is None:
            if not pdb_in_file or not os.path.isfile(pdb_in_file):
                raise Sorry("Cannot read input PDB file '%s'" %
                            (str(pdb_in_file)))
            else:
                print >> out, "Taking models from %s" % (pdb_in_file)
                pdb_string = open(pdb_in_file).read()
            pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_string)
            if pdb_inp is None:
                raise Sorry("Need a model or models")
        if not crystal_symmetry:
            crystal_symmetry = pdb_inp.crystal_symmetry()
        assert crystal_symmetry is not None
        hierarchy = pdb_inp.construct_hierarchy()
    else:
        hierarchy = pdb_hierarchy  # XXX FIXME
    n_models = 0
    for model in hierarchy.models():
        n_models += 1

    if n_models == 1:  # nothing to do
        return hierarchy

    #xrs = pdb_inp.xray_structure_simple(crystal_symmetry=crystal_symmetry)
    xrs = hierarchy.extract_xray_structure(crystal_symmetry=crystal_symmetry)
    xrs.scattering_type_registry(table=scattering_table)

    if not resolution:
        from cctbx import maptbx
        resolution = maptbx.resolution_from_map_and_model(map_data=map_data,
                                                          xray_structure=xrs)

    print >> out, "\nResolution limit: %7.2f" % (resolution)
    print >> out, "\nSummary of input models"
    xrs.show_summary(f=out, prefix="  ")

    print >> out, "\nReady with %d models and map" % (n_models)
    # Get CC by residue for each model and map

    chain_id_and_resseq_list = []  # Instead set up chain_id and resseq (range)
    from mmtbx.secondary_structure.find_ss_from_ca import \
        split_model,get_first_resno, get_last_resno,get_chain_id
    model_list = split_model(hierarchy=hierarchy, only_first_model=True)
    for m in model_list:
        h = m.hierarchy
        first_resno = get_first_resno(h)
        last_resno = get_last_resno(h)
        chain_id = get_chain_id(h)
        residue_range = [first_resno, last_resno]
        chain_id_and_resseq = [chain_id, residue_range]
        if not chain_id_and_resseq in chain_id_and_resseq_list:
            chain_id_and_resseq_list.append(chain_id_and_resseq)

    # Run through chains separately
    # NOTE: All models of each chain must match exactly

    # Save composite model, chain by chain
    from cStringIO import StringIO
    composite_model_stream = StringIO()

    for chain_id_and_resseq in chain_id_and_resseq_list:
        from cStringIO import StringIO
        f = StringIO()
        chain_id, [start_resno, end_resno] = chain_id_and_resseq
        atom_selection = get_atom_selection(chain_id=chain_id,
                                            start_resno=start_resno,
                                            end_resno=end_resno)
        asc = hierarchy.atom_selection_cache()
        sel = asc.selection(string=atom_selection)
        sel_hierarchy = hierarchy.select(sel)
        pdb_inp = sel_hierarchy.as_pdb_input(crystal_symmetry=crystal_symmetry)
        ph = pdb_inp.construct_hierarchy()

        print >> out, "\nWorking on chain_id='%s' resseq %d:%d\n" % (
            chain_id_and_resseq[0], chain_id_and_resseq[1][0],
            chain_id_and_resseq[1][1])

        # get CC values for all residues
        cc_dict = get_cc_dict(hierarchy=ph,
                              map_data=map_data,
                              d_min=resolution,
                              crystal_symmetry=crystal_symmetry,
                              table=scattering_table,
                              out=out)

        # smooth CC values with window of smoothing_window
        smoothed_cc_dict = smooth_cc_values(cc_dict=cc_dict,
                                            smoothing_window=smoothing_window,
                                            verbose=verbose,
                                            out=out)

        # figure out all the places where crossover can occur.

        n_residues = cc_dict[cc_dict.keys()[0]].size()

        crossover_dict = get_crossover_dict(
            n_residues=n_residues,
            hierarchy=ph,
            crossover_atom=crossover_atom,
            dist_max=dist_max,
            minimum_matching_atoms=minimum_matching_atoms,
            verbose=verbose,
            out=out)

        # Now we are ready to identify the best composite model...
        # A composite has reside 0 from model x, residue 1 from model y etc.
        # Each change from model a to model b between residues i and i+1 must have
        #  a crossover between a and b at either residue i or i+1

        keys = cc_dict.keys()
        keys.sort()

        sorted_working_model_list = []
        for key in keys:
            working_model = model_object(
                source_id=key,
                cc_dict=cc_dict,
                smoothed_cc_dict=smoothed_cc_dict,
                crossover_dict=crossover_dict,
                minimum_length=minimum_length,
                minimum_improvement=minimum_improvement,
                max_regions_to_test=max_regions_to_test,
                max_ends_per_region=max_ends_per_region,
                maximum_fraction=maximum_fraction)
            if verbose:
                working_model.show_summary(out=out)
            sorted_working_model_list.append(
                [working_model.get_score(), working_model])
        sorted_working_model_list.sort()
        sorted_working_model_list.reverse()
        sorted_working_model_list=\
           sorted_working_model_list[:max_keep]
        working_model_list = []
        for s, m in sorted_working_model_list:
            working_model_list.append(m)

        # Go through all the working models and cross them with other models to
        #  optimize...Then take all the best and cross...

        best_score, best_model = sorted_working_model_list[0]
        found = True
        cycle = 0
        while found:
            cycle += 1
            print >> out, "\nCYCLE %d current best is %7.3f\n" % (
                cycle, best_model.get_score())
            found = False
            sorted_working_model_list = []
            new_best = best_model
            id = 0
            for working_model in working_model_list:
                id += 1
                others = []
                for m in working_model_list:
                    if not working_model == m: others.append(m)
                new_working_model = working_model.optimize_with_others(
                    others=others)
                if not new_working_model:
                    print
                    continue
                aa = [new_working_model.get_score(), new_working_model]
                if not aa in sorted_working_model_list:
                    sorted_working_model_list.append(aa)
            if not sorted_working_model_list:
                break  # nothing to do

            sorted_working_model_list.sort()
            sorted_working_model_list.reverse()
            sorted_working_model_list = sorted_working_model_list[:max_keep]

            new_working_score, new_working_model = sorted_working_model_list[0]
            if new_working_score > best_model.get_score():
                best_model = new_working_model
                found = True
                if verbose:
                    print >> out, "NEW BEST SCORE: %7.2f" % (
                        best_model.get_score())
                    best_model.show_summary(out=out)

        print >> out, "\nDONE... best is %7.3f\n" % (best_model.get_score())

        # Create composite of this chain

        # Note residue values. We are going to pick each residue from one of
        # the models

        for model in ph.models():
            for chain in model.chains():
                if chain.id != chain_id: continue
                residue_list = []
                for rg in chain.residue_groups():
                    residue_list.append(rg.resseq)
        residue_list.sort()
        assert len(best_model.source_list) == len(residue_list)

        for i in xrange(len(residue_list)):
            atom_selection = get_atom_selection(
                model_id=best_model.source_list[i], resseq_sel=residue_list[i])
            asc = ph.atom_selection_cache()
            sel = asc.selection(string=atom_selection)
            sel_hierarchy = ph.select(sel)
            print >> composite_model_stream, remove_ter(
                sel_hierarchy.as_pdb_string())

    #  All done, make a new pdb_hierarchy
    pdb_string = composite_model_stream.getvalue()
    pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_string)
    pdb_hierarchy = pdb_inp.construct_hierarchy()

    if pdb_out:
        f = open(pdb_out, 'w')
        print >> f, pdb_hierarchy.as_pdb_string(
            crystal_symmetry=crystal_symmetry)
        print "Final model is in: %s\n" % (f.name)
        f.close()

    return pdb_hierarchy
예제 #49
0
class CacheInserter(object):
    """
    Bulk insert bunches of Django objects by converting them in strings
    and by using COPY FROM.
    """
    instances = weakref.WeakSet()

    @classmethod
    def flushall(cls):
        """
        Flush the caches of all the instances of CacheInserter.
        """
        for instance in cls.instances:
            instance.flush()

    @classmethod
    def saveall(cls, objects):
        """
        Save a sequence of Django objects in the database in a single
        transaction, by using a COPY FROM. Returns the ids of the inserted
        objects.
        """
        self = cls(objects[0].__class__, 1000)
        curs = connections[self.alias].cursor()
        seq = self.tname.replace('"', '') + '_id_seq'
        with transaction.commit_on_success(using=self.alias):
            reserve_ids = "select nextval('%s') "\
                "from generate_series(1, %d)" % (seq, len(objects))
            curs.execute(reserve_ids)
            ids = [i for (i, ) in curs.fetchall()]
            stringio = StringIO()
            for i, obj in zip(ids, objects):
                stringio.write('%d\t%s\n' % (i, self.to_line(obj)))
            stringio.reset()
            curs.copy_from(stringio, self.tname)
            stringio.close()
        return ids

    def __init__(self, dj_model, max_cache_size):
        self.table = dj_model
        self.max_cache_size = max_cache_size
        self.alias = router.db_for_write(dj_model)
        meta = dj_model._meta
        self.tname = '"%s"' % meta.db_table
        meta._fill_fields_cache()
        self.fields = [f.attname for f in meta._field_name_cache[1:]]
        # skip the first field, the id

        self.nlines = 0
        self.stringio = StringIO()
        self.instances.add(self)

    def add(self, obj):
        """
        :param obj: a Django model object

        Append an object to the list of objects to save. If the list exceeds
        the max_cache_size, flush it on the database.
        """
        assert isinstance(obj, self.table), 'Expected instance of %s, got %r' \
            % (self.table.__name__, obj)
        line = self.to_line(obj)
        self.stringio.write(line + '\n')
        self.nlines += 1
        if self.nlines >= self.max_cache_size:
            self.flush()

    def flush(self):
        """
        Save the pending objects on the database with a COPY FROM.
        """
        if not self.nlines:
            return

        # save the StringIO object with a COPY FROM
        with transaction.commit_on_success(using=self.alias):
            curs = connections[self.alias].cursor()
            self.stringio.reset()
            curs.copy_from(self.stringio, self.tname, columns=self.fields)
            self.stringio.close()
            self.stringio = StringIO()

        ## TODO: should we add an assert that the number of rows stored
        ## in the db is the expected one? I (MS) have seen a case where
        ## this fails silently (it was for True/False not converted in t/f)

        LOGGER.debug('saved %d rows in %s', self.nlines, self.tname)
        self.nlines = 0

    def to_line(self, obj):
        """
        Convert the fields of a Django object into a line string suitable
        for import via COPY FROM. The encoding is UTF8.
        """
        cols = []
        for f in self.fields:
            col = getattr(obj, f)
            if col is None:
                col = r'\N'
            elif isinstance(col, bool):
                col = 't' if col else 'f'
            elif isinstance(col, Point):
                col = 'SRID=4326;' + col.wkt
            elif isinstance(col, GeometryField):
                col = col.wkt()
            elif isinstance(col, (tuple, list)):
                # for numeric arrays; this is fragile
                col = self.array_to_pgstring(col)
            else:
                col = unicode(col).encode('utf8')
            cols.append(col)
        return '\t'.join(cols)

    @staticmethod
    def array_to_pgstring(a):
        """
        Convert a Python list/array into the Postgres string-representation
        of it.
        """
        ls = []
        for n in a:
            s = str(n)
            if s.endswith('L'):  # strip the trailing "L"
                s = s[:-1]
            ls.append(s)
        return '{%s}' % ','.join(ls)
예제 #50
0
class PSPParser:
	""" The PSPParser class does the actual sniffing through the input file looking for anything we're interested in.
	Basically, it starts by looking at the code looking for a '<' symbol. It looks at the code by working with a PSPReader
	object, which handle the current location in the code. When it finds one, it calls a list of functions, the xxxChecks,
	asking each if it recognizes the characters as its kind of input.  When the check functions look at the characters,
	if they want it, they go ahead and gobble it up and set up to create it in the Servlet when the time comes.  When they
	return, they return true if they acceptes the character, and the PSPReader object cursor is positioned past the end of
	the block that the check function accepted."""

	checklist=[]
	
	def __init__(self,ctxt):

		self._reader = ctxt.getReader()
		self._writer = ctxt.getServletWriter()
		self._handler = None
	
		self.cout = StringIO() #This is where we dump straight HTML code that none of the checks want
		self.tmplStart=0 #marks the start of HTML code
		self.tmplStop = 0 #marks the end of HTML code
		self.currentFile = self._reader.Mark().getFile()

	def setEventHandler(self,handler):
		"""Set the handler this parser will use when it finds psp code."""
		self._handler = handler

	def flushCharData(self, start, stop):
		"""Dump all the HTML that we've accumulated over to the character data handler in the event handler object."""
		data = self.cout.getvalue()
		self.cout.close()
		if len(data) > 0: # make sure there's something there
			self._handler.handleCharData(start, stop, data)
		self.cout = StringIO()

	def commentCheck(self, handler, reader):
		"""Comments just get eaten"""
		OPEN_COMMENT = '<%--'
		CLOSE_COMMENT = '--%>'

		if reader.Matches(OPEN_COMMENT):
			reader.Advance(len(OPEN_COMMENT))
			start = reader.Mark()
			stop = reader.skipUntil(CLOSE_COMMENT)
			if stop == None:
				raise 'ParseException'

			self.flushCharData(self.tmplStart, self.tmplStop)
			return 1
		return 0

	checklist.append(commentCheck) #add this checker to the list that the parse function will call


	def checkExpression(self, handler, reader):
		""" Look for "expressions" and handle them"""
	
		OPEN_EXPR = '<%='
		CLOSE_EXPR = '%>'
		end_open = None
		attrs=None
	
		if not reader.Matches(OPEN_EXPR):
			return 0

		reader.Advance(len(OPEN_EXPR)) #eat the opening tag

		if end_open != None:
			attrs = reader.parseTagAttributes()
			reader.skipSpaces()
			if not reader.matches(end_open):
				raise 'ParseException'
			reader.Advance(len(end_open))
			reader.skipSpaces()
		#below not implemented
		#PSPUtil.checkAttrs('Expression',attrs,validAttrs)

		reader.peekChar()
		reader.skipSpaces()
		start = reader.Mark()
		stop = reader.skipUntil(CLOSE_EXPR)
		if stop == None:
			raise 'ParserException'
		handler.setTemplateInfo(self.tmplStart, self.tmplStop)
		handler.handleExpression(start, stop, attrs)
		return 1

	checklist.append(checkExpression)



	def checkDirective(self, handler, reader):
		""" Check for directives.  I support two right now, page and include."""
		validDirectives = ['page','include']
		OPEN_DIRECTIVE = r'<%@'
		CLOSE_DIRECTIVE = r'%>'

		if reader.Matches(OPEN_DIRECTIVE):
			opening = OPEN_DIRECTIVE
			close = CLOSE_DIRECTIVE
		else:
			return 0
		start = reader.Mark()

		reader.Advance(len(OPEN_DIRECTIVE))

		match = None	
		reader.skipSpaces()
		for i in validDirectives:			
			if reader.Matches(i):
				match = i
				break

		if match == None:
			raise 'Invalid Directive'
	
		reader.Advance(len(match))

		#parse the directive attr:val pair dictionary
		attrs = reader.parseTagAttributes()
##		not checking for validity yet
##		if match == 'page':
##			PSPUtils.checkAttributes('Page Directive', attrs, pageDvalidAttrs)
##		elif match == 'include':
##			PSPUtils.checkAttributes('Include Directive', attrs, includeDvalidAttrs)
##		elif match == 'taglib':
##			raise 'Not Implemented Error'

		#match close
		reader.skipSpaces() #skip to where we expect a close tag
		if not reader.Matches(close):
			raise 'Unterminated directive error'
		else:
			reader.Advance(len(close)) #advance past it
		stop = reader.Mark()

		handler.setTemplateInfo(self.tmplStart, self.tmplStop)
		handler.handleDirective(match, start, stop, attrs)
		
		return 1

	checklist.append(checkDirective)

	
	def checkEndBlock(self, handler, reader):
		OPEN_SCRIPT='<%'
		CLOSE_SCRIPT='%>'
		CLOSE_SCRIPT2='$%>'
		CENTER_SCRIPT='end'
		start=reader.Mark()

		if reader.Matches(OPEN_SCRIPT):
			reader.Advance(len(OPEN_SCRIPT))
			reader.skipSpaces()
			if reader.Matches(CENTER_SCRIPT):
				reader.Advance(len(CENTER_SCRIPT))
				reader.skipSpaces()
				if reader.Matches(CLOSE_SCRIPT):
					reader.Advance(len(CLOSE_SCRIPT))
					handler.setTemplateInfo(self.tmplStart, self.tmplStop)
					handler.handleEndBlock()
					return 1
				if reader.Matches(CLOSE_SCRIPT2):
					reader.Advance(len(CLOSE_SCRIPT2))
					handler.setTemplateInfo(self.tmplStart, self.tmplStop)
					handler.handleEndBlock()
					print ">>>>Putting a $ at the end of an end tag does nothing, I Say"
					return 1
		#that wasn't it
		reader.reset(start)
		return 0

	checklist.append(checkEndBlock)
	
	


	def checkScript(self, handler, reader):
		""" The main thing we're after.  Check for embedded scripts"""
		OPEN_SCRIPT = '<%'
		CLOSE_SCRIPT = '%>'
		attrs=None

		end_open = None

		if reader.Matches(OPEN_SCRIPT):
			open = OPEN_SCRIPT
			close = CLOSE_SCRIPT
		else:
			return 0

		reader.Advance(len(open))#Matches advances it

		if end_open != None:
			attrs = reader.parseTagAttributes()

			reader.skipSpaces()
			if not reader.Matches(end_open):
				raise 'Unterminated script'
			reader.Advance(len(end_open))
			reader.skipSpaces()
			PSPUtils.checkAttributes('Script', attrs, ValidAttributes)
		#reader.skipSpaces() #don't skip as spaces may be significant, leave this for the generator
		start = reader.Mark()
		try:
			stop = reader.skipUntil(close)
		except EOFError:
			raise EOFError("Reached EOF while looking for ending script tag")
		if stop == None:
			raise 'Unterminated Script'
		handler.setTemplateInfo(self.tmplStart, self.tmplStop)
		handler.handleScript(start, stop, attrs)
		return 1

	checklist.append(checkScript)



	def checkMethod(self, handler, reader):
		""" Check for class methods defined in the page. I only support one format for these,
		<psp:method name="xxx" params="xxx,xxx"> Then the function BODY, then <psp:method> """
		OPEN_METHOD='<psp:method'
		CLOSE_METHOD='/>'
		CLOSE_METHOD_2='</psp:method>'
		CLOSE_METHOD_3='>'

		attrs=None

		validAttributes=('name','params')

		if reader.Matches(OPEN_METHOD):
			start = reader.Mark()
			reader.Advance(len(OPEN_METHOD))
			attrs=reader.parseTagAttributes()
			#PSPUtils.checkAttributes('method',attrs,validAttributes)
			reader.skipSpaces()
			if not reader.Matches(CLOSE_METHOD_3):
				raise 'Expected method declaration close'
			reader.Advance(len(CLOSE_METHOD_3))
			stop = reader.Mark()
			handler.setTemplateInfo(self.tmplStart, self.tmplStop)
			handler.handleMethod(start, stop, attrs)
			start=stop
			stop=reader.skipUntil(CLOSE_METHOD_2) #skip past the close marker, return the point before the close marker
			handler.handleMethodEnd(start, stop, attrs)
			return 1
		return 0

	checklist.append(checkMethod)



	def checkInclude(self, handler, reader):
		"""
		Check for inserting another pages output in this spot.
		"""
	
		OPEN_INCLUDE = '<psp:include'
		CLOSE_INCLUDE_NO_BODY = "/>"
		CLOSE_INCLUDE_BODY = ">"
		CLOSE_INCLUDE = "</psp:include>"
		OPEN_INDIVIDUAL_PARAM = "<psp:param"
		CLOSE_INDIVIDUAL_PARAM = "/>"

		if reader.Matches(OPEN_INCLUDE):
			param={}
			start = reader.Mark()
			reader.Advance(len(OPEN_INCLUDE))
			reader.skipSpaces()
			attrs = reader.parseTagAttributes()
			#PSPUtils.checkTagAttributes()....
			reader.skipSpaces()
			if not reader.Matches(CLOSE_INCLUDE_BODY):
				raise "Include bodies not implemented"
			reader.Advance(len(CLOSE_INCLUDE_BODY))
			stop = reader.Mark()
			handler.setTemplateInfo(self.tmplStart, self.tmplStop)
			handler.handleInclude(attrs, param)
			return 1
		return 0

	checklist.append(checkInclude)
	


	def checkInsert(self, handler, reader):
		"""Check for straight character dumps.  No big hurry for this.  It's almost the same as
		as the page include directive.  This is only a partial implementation of what JSP does.
		JSP can pull it from another server, servlet, JSP page, etc."""
	
		OPEN_INSERT = '<psp:insert'
		CLOSE_INSERT_NO_BODY = "/>"
		CLOSE_INSERT_BODY = ">"
		CLOSE_INSERT = "</psp:insert>"
		OPEN_INDIVIDUAL_PARAM = "<psp:param"
		CLOSE_INDIVIDUAL_PARAM = "/>"

		if reader.Matches(OPEN_INSERT):
			param={}
			start = reader.Mark()
			reader.Advance(len(OPEN_INSERT))
			reader.skipSpaces()
			attrs = reader.parseTagAttributes()
			#PSPUtils.checkTagAttributes()....
			reader.skipSpaces()
			if not reader.Matches(CLOSE_INSERT_BODY):
				raise "Insert bodies not implemented"
			reader.Advance(len(CLOSE_INSERT_BODY))
			stop = reader.Mark()
			handler.setTemplateInfo(self.tmplStart, self.tmplStop)
			handler.handleInsert(attrs, param)
			return 1
		return 0

	checklist.append(checkInsert)


	def parse(self, until=None, accept=None):
		""" Parse the PSP file"""
		noPspElement = 0
		reader = self._reader
		handler = self._handler

		while reader.hasMoreInput():
		
			#This is for XML style blocks, which I'm not handling yet
			if until !=None and reader.Matches(until):
				return

	

			#If the file the reader is working on has changed due to a push or pop,
			#flush any char data from the old file
			if not reader.Mark().getFile() == self.currentFile:
				self.flushCharData(self.tmplStart, self.tmplStop)
				self.currentFile = reader.Mark().getFile()
				self.tmplStart = reader.Mark()
		

		##in JSP, this is an array of valid tag type to check for, I'm not using it now,
		## and I don't think JSP does either
			if accept:
				pass

			accepted = 0

			for checkfunc in self.checklist:
				if checkfunc(self, handler, reader):
					accepted = 1
					noPspElement = 0
					break

			if not accepted:
				if not noPspElement:
					self.tmplStart = reader.Mark()
					noPspElement = 1
				st = reader.nextContent() #skip till the next possible tag
				self.tmplStop = reader.Mark() #mark the end of HTML data
				self.cout.write(st) #write out the raw HTML data
		
			self.flushCharData(self.tmplStart, self.tmplStop) #dump remaining raw HTML
예제 #51
0
    def Print_to_excel(self):
        context = self._context
        filename = 'Libro_IVA_Compras.xls'
        workbook = xlwt.Workbook(encoding="UTF-8")
        worksheet = workbook.add_sheet('Detalle')
        #style = xlwt.easyxf('font:height 400, bold True, name Arial; align: horiz center, vert center;borders: top medium,right medium,bottom medium,left medium')
        #worksheet.write_merge(0,1,0,7,'REPORT IN EXCEL',style)

        #data
        vatcode_ids = list(self.vatcode_id)
        date_froms = self.date_from
        date_tos = self.date_to
        domain = [('date', '>=', date_froms), ('date', '<=', date_tos),
                  ('type', '!=', 'out_invoice'), ('type', '!=', 'out_refund'),
                  ('journal_id.use_documents', '=', True),
                  ('state', 'not in', ['draft', 'cancel'])]
        if self.journal_ids:
            domain.append(('journal_id.id', 'in', list(self.journal_ids._ids)))

        invoiceModel = self.env['account.invoice']
        invoices = invoiceModel.search(domain,
                                       order="date_invoice,display_name2")
        #vatarray = self.gettotalsperVAT(invoices)
        vatarray = {}
        new_array = self.get_new_array()
        new_vat_array = self.fill_array(new_array, invoices)
        # Titles
        worksheet.write(0, 0, _('Nombre del Informe: Libro IVA Compras'))
        worksheet.write(1, 0, _('Empresa: ') + self.env.user.company_id.name)
        cuit = self.env.user.company_id.partner_id.main_id_number
        worksheet.write(
            2, 0,
            _('CUIT: ') + cuit[0:2] + '-' + cuit[2:10] + '-' + cuit[10:11])
        worksheet.write(
            4, 0,
            _('Periodo ') + date_froms[8:10] + '-' + date_froms[5:7] + '-' +
            date_froms[0:4] + ':' + date_tos[8:10] + '-' + date_tos[5:7] +
            '-' + date_tos[0:4])

        vattot = OrderedDict()
        vattot['IVA 21%'] = 0.00
        vattot['IVA 10.50%'] = 0.00
        vattot['IVA 27%'] = 0.00
        vattot['IVA 5%'] = 0.00
        vattot['IVA 2.50%'] = 0.00
        vattot['Monotributo'] = 0.00

        #columns
        index = 5
        if self.det_level == 'detailed':
            subindex = 0
            worksheet.write(index, subindex, _('Fecha'))
            subindex += 1
            worksheet.write(index, subindex, _('Tipo Doc'))
            subindex += 1
            worksheet.write(index, subindex, _('Serie'))
            subindex += 1
            worksheet.write(index, subindex, _('Nro. Comp'))
            subindex += 1
            worksheet.write(index, subindex, _('Resp IVA'))
            subindex += 1
            worksheet.write(index, subindex, _('CUIT/CUIL'))
            subindex += 1
            worksheet.write(index, subindex, _('Nombre'))
            subindex += 1
            worksheet.write(index, subindex, _('Neto Gravado'))
            subindex += 1
            for key in vattot:
                worksheet.write(index, subindex, key)
                subindex += 1
            worksheet.write(index, subindex, _('Exento'))
            subindex += 1
            worksheet.write(index, subindex, _('Percepcion IVA'))
            subindex += 1
            worksheet.write(index, subindex, _('Percepcion IIBB'))
            subindex += 1
            worksheet.write(index, subindex, _('Impuestos Internos'))
            subindex += 1
            worksheet.write(index, subindex, _('No Gravado'))
            subindex += 1
            #worksheet.write(0,11,'IVA')
            worksheet.write(index, subindex, _('Total'))
        else:
            subindex = 0
            worksheet.write(index, subindex, _('Fecha'))
            subindex += 1
            worksheet.write(index, subindex, _('Nombre'))
            subindex += 1
            worksheet.write(index, subindex, _('CUIT/CUIL'))
            subindex += 1
            worksheet.write(index, subindex, _('Resp IVA'))
            subindex += 1
            worksheet.write(index, subindex, _('Tipo Doc'))
            subindex += 1
            worksheet.write(index, subindex, _('Serie'))
            subindex += 1
            worksheet.write(index, subindex, _('Nro. Comp'))
            subindex += 1
            worksheet.write(index, subindex, _('Total'))
            subindex += 1
            worksheet.write(index, subindex, _('Neto Gravado'))
            subindex += 1
            worksheet.write(index, subindex, _('No Gravado'))
            subindex += 1
            worksheet.write(index, subindex, _('IVA'))
            subindex += 1
            worksheet.write(index, subindex, _('Percepciones'))
            subindex += 1
            worksheet.write(index, subindex, _('Impuestos Internos'))
            subindex += 1

        index = 7
        camount_untaxed = 0
        gettotpercep = 0
        gettotgrossincome = 0
        gettotexempt = 0
        gettotnovat = 0
        camount_total = 0
        gettotinttaxes = 0
        tot1 = tot2 = tot3 = tot4 = tot5 = tot6 = 0

        matrix = {}
        matrixbase = {}
        vatcodes = {}
        vatcodesbase = {}
        tmp = 0.0
        for o in invoices:
            if o.journal_id.use_documents and o.validated_inv(self):
                if o.document_number:
                    if len(o.document_number) == 13:
                        nr_ = "0" + str(o.document_number)
                    else:
                        nr_ = str(o.document_number)
                else:
                    nr_ = ""
                subindex = 0
                if self.det_level == 'detailed':
                    worksheet.write(index, subindex, o.date_invoice)
                    subindex += 1

                    worksheet.write(
                        index, subindex, o.journal_document_type_id.
                        document_type_id.report_name)
                    subindex += 1

                    let = o.journal_document_type_id.document_type_id.document_letter_id.name
                    worksheet.write(index, subindex, str(let))  # letra
                    subindex += 1

                    worksheet.write(index, subindex, nr_)  # nro comprob
                    subindex += 1

                    worksheet.write(
                        index, subindex,
                        str(o.partner_id.afip_responsability_type_id.
                            report_code_name))
                    subindex += 1

                    if str(o.partner_id.main_id_number) == 'False':
                        worksheet.write(index, subindex, ' ')
                    else:
                        cuit = o.partner_id.main_id_number
                        if o.partner_id.main_id_category_id.code != 'DNI':
                            worksheet.write(
                                index, subindex, cuit[0:2] + '-' + cuit[2:10] +
                                '-' + cuit[10:11])
                        else:
                            worksheet.write(index, subindex, cuit)
                    subindex += 1

                    worksheet.write(index, subindex, o.partner_id.name)
                    subindex += 1

                    #tot = o.camount_untaxed()
                    tot = new_vat_array[o.id]['nett']
                    worksheet.write(index, subindex, tot)
                    camount_untaxed += tot
                    subindex += 1

                    #worksheet.write(index, 11, o.gettotvat())
                    #vatarray = self.gettotalsperVAT(o)
                    for key in vattot:
                        worksheet.write(index, subindex,
                                        new_vat_array[o.id][key])
                        vattot[key] += new_vat_array[o.id][key]
                        subindex += 1

                    #tot = o.gettotexempt()
                    tot = new_vat_array[o.id]['exempt']
                    worksheet.write(index, subindex, tot)
                    gettotexempt += tot
                    subindex += 1

                    #tot = o.gettotpercep()
                    tot = new_vat_array[o.id]['perception']
                    worksheet.write(index, subindex, tot)
                    gettotpercep += tot
                    subindex += 1

                    #tot = o.gettotgrossincome()
                    tot = new_vat_array[o.id]['grossincome']
                    worksheet.write(index, subindex, tot)
                    gettotgrossincome += tot
                    subindex += 1

                    #tot = o.gettotinttaxes()
                    tot = new_vat_array[o.id]['other']
                    worksheet.write(index, subindex, tot)
                    gettotinttaxes += tot
                    subindex += 1

                    #tot = o.gettotnovat()
                    tot = new_vat_array[o.id]['novat']
                    worksheet.write(index, subindex, tot)
                    gettotnovat += tot
                    subindex += 1

                    tot = o.camount_total()
                    worksheet.write(index, subindex, tot)
                    camount_total += tot
                    index += 1

                    #Matrix for vat totals grouped by document_type_id
                    for vat in o.tax_line_ids:
                        if vat.tax_id.vatreport_included:
                            amount = float(
                                MultiplybyRate(o.currency_rate, vat.amount,
                                               o.company_currency_id,
                                               o.currency_id))
                            base = float(
                                MultiplybyRate(o.currency_rate, vat.base,
                                               o.company_currency_id,
                                               o.currency_id))
                            name_key = o.partner_id.afip_responsability_type_id.name

                            if vat.name in vatcodes:
                                vatcodes[vat.name] += amount
                            else:
                                vatcodes.update({vat.name: amount})

                            if vat.amount > 0:
                                if o.document_type_id.internal_type == 'credit_note':
                                    monto = -amount
                                else:
                                    monto = amount
                            if vat.amount == 0:
                                if o.document_type_id.internal_type == 'credit_note':
                                    monto = -base
                                else:
                                    monto = base

                            if not name_key in matrix.keys():
                                matrix[name_key] = {vat.name: monto}
                            else:
                                if not vat.name in matrix[name_key].keys():
                                    matrix[name_key].update({vat.name: monto})
                                else:
                                    matrix[name_key][vat.name] += monto

                            if vat.amount > 0:
                                if o.document_type_id.internal_type == 'credit_note':
                                    monto = -base
                                else:
                                    monto = base
                            if vat.amount == 0:
                                if o.document_type_id.internal_type == 'credit_note':
                                    monto = -amount
                                else:
                                    monto = amount

                            if not name_key in matrixbase.keys():
                                matrixbase[name_key] = {vat.name: monto}
                            else:
                                if not vat.name in matrixbase[name_key].keys():
                                    matrixbase[name_key].update(
                                        {vat.name: monto})
                                else:
                                    matrixbase[name_key][vat.name] += monto

                else:
                    worksheet.write(index, subindex, o.date_invoice)
                    subindex += 1

                    worksheet.write(index, subindex, o.partner_id.name)
                    subindex += 1

                    if str(o.partner_id.main_id_number) == 'False':
                        worksheet.write(index, subindex, ' ')
                    else:
                        worksheet.write(index, subindex,
                                        o.partner_id.main_id_number)
                    subindex += 1

                    worksheet.write(
                        index, subindex,
                        str(o.partner_id.afip_responsability_type_id.
                            report_code_name))
                    subindex += 1

                    worksheet.write(
                        index, subindex, o.journal_document_type_id.
                        document_type_id.report_name)
                    subindex += 1

                    let = o.journal_document_type_id.document_type_id.document_letter_id.name
                    worksheet.write(index, subindex, str(let))  # letra
                    subindex += 1

                    worksheet.write(index, subindex, nr_)  # nro comprob
                    subindex += 1

                    tot = 0.0
                    if o.document_type_id.internal_type == 'credit_note':
                        tot = -o.camount_total()
                    else:
                        tot = o.camount_total()
                    tot1 += tot
                    worksheet.write(index, subindex, str(tot))
                    subindex += 1

                    worksheet.write(index, subindex,
                                    str(new_vat_array[o.id]['nett']))
                    tot2 += o.camount_untaxed()
                    subindex += 1

                    worksheet.write(
                        index, subindex,
                        str(new_vat_array[o.id]['exempt'] +
                            new_vat_array[o.id]['novat']))
                    tot3 += new_vat_array[o.id]['exempt'] + new_vat_array[
                        o.id]['novat']
                    subindex += 1

                    tot = 0.0
                    #vatarray = self.gettotalsperVAT(o)
                    for key in vattot:
                        tot += new_vat_array[o.id][key]
                    tot4 += tot
                    worksheet.write(index, subindex, str(tot))
                    subindex += 1

                    tot = new_vat_array[o.id]['perception'] + new_vat_array[
                        o.id]['grossincome']
                    tot5 += tot
                    worksheet.write(index, subindex, tot)
                    subindex += 1

                    tot = new_vat_array[o.id]['other']
                    tot6 += tot
                    worksheet.write(index, subindex, tot)
                    subindex += 1
                    index += 1

        if self.det_level == 'detailed':
            worksheet.write(index, 0, _("Totales"))
            subindex = 7
            worksheet.write(index, subindex, camount_untaxed)
            subindex += 1
            for key in vattot:
                worksheet.write(index, subindex, vattot[key])
                subindex += 1
            worksheet.write(index, subindex, gettotexempt)
            subindex += 1
            worksheet.write(index, subindex, gettotpercep)
            subindex += 1
            worksheet.write(index, subindex, gettotgrossincome)
            subindex += 1
            worksheet.write(index, subindex, gettotinttaxes)
            subindex += 1
            worksheet.write(index, subindex, gettotnovat)
            subindex += 1
            worksheet.write(index, subindex, camount_total)

            index += 2
            subindex = 0
            if_base = dict()
            worksheet.write(index, subindex, _("Totales Agrupados"))
            subindex += 1
            for code in vatcodes:
                foundf = False
                for type in matrix:
                    for key, value in matrix[type].iteritems():
                        if key == code:
                            if matrixbase[type][key] > 0:
                                foundf = True
                                if_base[key] = True
                if foundf:
                    worksheet.write(index, subindex, _("Base"))
                    subindex += 1
                worksheet.write(index, subindex, code)
                subindex += 1

            # print matrix
            totgrp = 0
            for type in matrix:
                index += 1
                subindex = 0
                worksheet.write(index, subindex, type)
                subindex += 1
                for code in vatcodes:
                    foundf = False
                    for key, value in matrix[type].iteritems():
                        if key == code:
                            foundf = True
                            if key in if_base.keys() and if_base[key] == True:
                                worksheet.write(index, subindex,
                                                matrixbase[type][key])
                                subindex += 1
                            worksheet.write(index, subindex, value)
                            subindex += 1
                            totgrp += (matrixbase[type][key] + value)
                    if not foundf:
                        subindex += 1
                        if code in if_base.keys() and if_base[code] == True:
                            subindex += 1
                worksheet.write(index, subindex, totgrp)
                subindex += 1
                totgrp = 0

            #
            # index += 2
            # subindex = 0
            # worksheet.write(index, subindex, _("Totales Agrupados"))
            # subindex += 1
            # for code in vatcodes:
            #     worksheet.write(index, subindex, _("Base"))
            #     subindex += 1
            #     worksheet.write(index, subindex, code)
            #     subindex += 1
            # worksheet.write(index, subindex, _("Totales"))
            # subindex += 1
            #
            # # print matrix
            # index += 2
            # totgrp = 0
            # for type in matrix:
            #     index += 1
            #     subindex = 0
            #     worksheet.write(index, subindex, type)
            #     subindex += 1
            #     for code in vatcodes:
            #         foundf = False
            #         for key, value in matrix[type].iteritems():
            #             if key == code:
            #                 foundf = True
            #                 worksheet.write(index, subindex, matrixbase[type][key])
            #                 subindex += 1
            #                 worksheet.write(index, subindex, value)
            #                 subindex += 1
            #                 totgrp +=  (matrixbase[type][key] + value)
            #         if not foundf:
            #             subindex += 2
            #     worksheet.write(index, subindex, totgrp)
            #     subindex += 1
            #     totgrp = 0

        else:
            subindex = 7
            worksheet.write(index, subindex, tot1)
            subindex += 1
            worksheet.write(index, subindex, tot2)
            subindex += 1
            worksheet.write(index, subindex, tot3)
            subindex += 1
            worksheet.write(index, subindex, tot4)
            subindex += 1
            worksheet.write(index, subindex, tot5)
            subindex += 1
            worksheet.write(index, subindex, tot6)
            subindex += 1

        fp = StringIO()
        workbook.save(fp)
        export_id = self.env['excel.extended'].create({
            'excel_file':
            base64.encodestring(fp.getvalue()),
            'file_name':
            filename
        }).id
        fp.close()
        return {
            'view_mode': 'form',
            'res_id': export_id,
            'res_model': 'excel.extended',
            'view_type': 'form',
            'type': 'ir.actions.act_window',
            'context': context,
            'target': 'new',
        }
예제 #52
0
    def arba_percep_to_txt(self):
        if not self.percep_code:
            raise Warning(_('Debe Completar al menos un codigo de Percepcion'))

        invoices = self.env['account.invoice']
        domain = [
            ('state', 'in', ['open', 'paid']),
            ('type', 'in', ['out_invoice', 'out_refund']),
            ('date_invoice', '<=', self.date_to),
            ('date_invoice', '>=', self.date_from),
            #('tax_line_ids', 'in', list(self.percep_code._ids))
        ]

        invoices = invoices.search(domain, order="date_invoice")

        filename1 = "arba_percepciones.txt"
        context = self._context
        t1 = int()
        str = ""
        for inv in invoices:
            taxes = inv.tax_line_ids.filtered(
                lambda x: x.tax_id.id in list(self.percep_code._ids))
            if any(tax.amount > 0 for tax in taxes):
                str += inv.partner_id.main_id_number[
                    0:2] + "-" + inv.partner_id.main_id_number[
                        2:10] + "-" + inv.partner_id.main_id_number[10:11]
                str += inv.date_invoice[8:10] + '/' + inv.date_invoice[
                    5:7] + '/' + inv.date_invoice[0:4]
                if inv.document_type_id.internal_type == 'invoice':
                    str += "F"
                elif inv.document_type_id.internal_type == 'credit_note':
                    str += "C"
                elif inv.document_type_id.internal_type == 'debit_note':
                    str += "D"
                else:
                    str += "?"
                str += inv.journal_document_type_id.document_type_id.document_letter_id.name
                str += "{:0>5}".format(inv.journal_id.point_of_sale_number)
                str += "{:0>8}".format(inv.document_number[5:])

                t1 = MultiplybyRate2(inv.currency_rate, inv.amount_untaxed,
                                     inv.company_currency_id, inv.currency_id)
                if inv.type != 'out_refund':
                    str += "{:0>12}".format(t1)
                else:
                    str += "-" + "{:0>11}".format(t1)

                t1 = MultiplybyRate2(inv.currency_rate,
                                     sum(taxes.mapped('amount')),
                                     inv.company_currency_id, inv.currency_id)
                if inv.type != 'out_refund':
                    str += "{:0>11}".format(t1)
                else:
                    str += "-" + "{:0>10}".format(t1)

                str += "{:?>1}".format(self.oper_type)
                str += '\r\n'

        fp = StringIO()
        fp.write(str)
        export_id = self.env['sicore.extended'].create({
            'excel_file':
            base64.encodestring(fp.getvalue()),
            'file_name':
            filename1
        }).id
        fp.close()
        return {
            'view_mode': 'form',
            'res_id': export_id,
            'res_model': 'sicore.extended',
            'view_type': 'form',
            'type': 'ir.actions.act_window',
            'context': context,
            'target': 'new',
        }
예제 #53
0
    def create_excel_data(self,**kwargs):
        self.ensure_one()
        data = {}
        data['ids'] = self.env.context.get('active_ids', [])
        data['model'] = self.env.context.get('active_model', 'ir.ui.menu')
        data['form'] = self.read(['date_from', 'date_to', 'journal_ids', 'target_move'])[0]
        used_context = self._build_contexts(data)
        data['form']['used_context'] = dict(used_context, lang=self.env.context.get('lang', 'en_US'))
        data = self.pre_print_report(data)
        display_account = data['form'].get('display_account')

        if kwargs['act_mod_name'] == 'account.account':
            acccs = []
            for acc_id in kwargs['act_ids'].split(','):
                if acc_id != '':
                    acccs.append(int(acc_id))
            accounts = self.env['account.account'].search([('id','in',acccs)])
        else:
            accounts =self.env['account.account'].search([])
        account_res = self.env['report.account.report_trialbalance'].with_context(data['form'].get('used_context'))._get_accounts(accounts, display_account)
        file_data = StringIO()
        workbook = xlsxwriter.Workbook(file_data)
        format1 = workbook.add_format()
        format1.set_bold()
        format1.set_font_size(30)
        format2 = workbook.add_format()
        format2.set_bold()
        format4 = workbook.add_format()
        format4.set_num_format('#,##0.00')

        worksheet = workbook.add_worksheet()
        if 'lang' in self.env.context and self.env.context.get('lang') == 'ar_SY':
            worksheet.right_to_left()

            worksheet.write(0, 0, fields.Datetime.now())
            worksheet.write(2, 0, 'ميزان المراجعة', format1)

            worksheet.write(4, 0, 'عرض الحسابات', format2)
            worksheet.write(5, 0, dict(self.fields_get(allfields=['display_account'])['display_account']['selection'])[self.display_account])
            worksheet.write(4, 3, 'الحركات المستهدفة', format2)
            worksheet.write(5, 3, dict(self.fields_get(allfields=['target_move'])['target_move']['selection'])[self.target_move])

            worksheet.write(7, 0, 'الكود', format2)
            worksheet.write(7, 1, 'الحساب', format2)
            worksheet.write(7, 2, 'مدين', format2)
            worksheet.write(7, 3, 'دائن', format2)
            worksheet.write(7, 4, 'الرصيد', format2)
        else:
            worksheet.write(0, 0, fields.Datetime.now())
            worksheet.write(2, 0, 'Trial Balance', format1)

            worksheet.write(4, 0, 'Display Accounts', format2)
            worksheet.write(5, 0, dict(self.fields_get(allfields=['display_account'])['display_account']['selection'])[
                self.display_account])
            worksheet.write(4, 3, 'Target Moves', format2)
            worksheet.write(5, 3, dict(self.fields_get(allfields=['target_move'])['target_move']['selection'])[
                self.target_move])

            worksheet.write(7, 0, 'Code', format2)
            worksheet.write(7, 1, 'Account', format2)
            worksheet.write(7, 2, 'Debit', format2)
            worksheet.write(7, 3, 'Credit', format2)
            worksheet.write(7, 4, 'Balance', format2)

        row = 8
        for line in account_res:
            worksheet.write(row, 0, line['code'])
            worksheet.write(row, 1, line['name'])
            worksheet.write(row, 2, line['debit'],format4)
            worksheet.write(row, 3, line['credit'],format4)
            worksheet.write(row, 4, line['balance'],format4)
            row+=1

        workbook.close()
        file_data.seek(0)
        file_content = file_data.read()
        file_data.close()

        return file_content
예제 #54
0
def repack_text(fn):
    print("Loading %s.txt"%fn)
    fs = open("Japanese/%s"%fn , 'rb')
    dec_size = struct.unpack("I", fs.read(4))[0]
    dec_size2 = struct.unpack("I", fs.read(4))[0]
    lzdata = fs.read()
    lz = lz11()
    o_buffer = lz.decompress_nlzss(lzdata)
    fp = StringIO(o_buffer)
    fp.seek(0)
    fp.seek(0)
    cnlines = codecs.open("cnTEXT//%s.txt"%fn , "rb" , "utf-16").readlines()
    string_list = makestr(cnlines)

    dest = StringIO()
    dest.write(o_buffer)
    dest.seek(0)
    if not os.path.exists("import"):
        os.makedirs("import")
    dest2 = open("import//%s"%(fn) , "wb")
    currentPos = 0
    pos = 0x20
    fp.seek(pos)
    while pos < len(o_buffer):
        fp.seek(pos)
        sig = fp.read(4)
        if sig == "LBL1":
            block_size, = struct.unpack(">I" , fp.read(4))
            padding = fp.read(8)
            block = fp.read(block_size)
            if block_size%0x10 != 0:
                null = fp.read(0x10 - block_size%0x10)
            pos = fp.tell()
        if sig == "ATR1":
            block_size, = struct.unpack(">I" , fp.read(4))
            padding = fp.read(8)
            block = fp.read(block_size)
            if block_size%0x10 != 0:
                null = fp.read(0x10 - block_size%0x10)
            pos = fp.tell()
        if sig == "TSY1":
            block_size, = struct.unpack(">I" , fp.read(4))
            padding = fp.read(8)
            block = fp.read(block_size)
            if block_size%0x10 != 0:
                null = fp.read(0x10 - block_size%0x10)
            pos = fp.tell()
        if sig == "TXT2":
            currentPos = pos
            break
    if currentPos > 0:
        dest.seek(currentPos)
        dest.truncate()
        block_data = build_block(currentPos , string_list)
        dest.write(block_data)
        end = dest.tell()
        dest.seek(0x12)
        dest.write(struct.pack(">I" , end))
    dst_data = dest.getvalue()
    dest2.write(struct.pack("I",len(dst_data)))
    dest2.write(struct.pack("I",len(dst_data)))
    print("compressing...")
    dest2.write(lz.compress_nlzss(dst_data,13))
    dest2.close()
    fp.close()
예제 #55
0
def read_sac_zpk(filename=None, file=None, string=None, get_comments=False):
    '''
    Read SAC Pole-Zero file.

    Returns (zeros, poles, constant).
    '''

    if filename is not None:
        f = open(filename, 'r')

    elif file is not None:
        f = file

    elif string is not None:
        f = StringIO(string)

    sects = ('ZEROS', 'POLES', 'CONSTANT')
    sectdata = {'ZEROS': [], 'POLES': []}
    npoles = 0
    nzeros = 0
    constant = 1.0
    atsect = None
    comments = []
    for iline, line in enumerate(f):
        toks = line.split()
        if len(toks) == 0:
            continue

        if toks[0][0] in '*#':
            comments.append(line)
            continue

        if len(toks) != 2:
            f.close()
            raise SacPoleZeroError('Expected 2 tokens in line %i of file %s' %
                                   (iline + 1, filename))

        if toks[0].startswith('*'):
            continue

        lsect = toks[0].upper()
        if lsect in sects:
            atsect = lsect
            sectdata[atsect] = []
            if lsect.upper() == 'ZEROS':
                nzeros = int(toks[1])
            elif toks[0].upper() == 'POLES':
                npoles = int(toks[1])
            elif toks[0].upper() == 'CONSTANT':
                constant = float(toks[1])
        else:
            if atsect:
                sectdata[atsect].append(complex(float(toks[0]),
                                                float(toks[1])))

    if f != file:
        f.close()

    poles = sectdata['POLES']
    zeros = sectdata['ZEROS']
    npoles_ = len(poles)
    nzeros_ = len(zeros)
    if npoles_ > npoles:
        raise SacPoleZeroError(
            'Expected %i poles but found %i in pole-zero file "%s"' %
            (npoles, npoles_, filename))
    if nzeros_ > nzeros:
        raise SacPoleZeroError(
            'Expected %i zeros but found %i in pole-zero file "%s"' %
            (nzeros, nzeros_, filename))

    if npoles_ < npoles:
        poles.extend([complex(0.)] * (npoles - npoles_))

    if nzeros_ < npoles:
        zeros.extend([complex(0.)] * (nzeros - nzeros_))

    if len(poles) == 0 and len(zeros) == 0:
        raise SacPoleZeroError('No poles and zeros found in file "%s"' %
                               (filename))

    if not num.all(num.isfinite(poles)):
        raise SacPoleZeroError(
            'Not finite pole(s) found in pole-zero file "%s"' %
            (constant, filename))

    if not num.all(num.isfinite(zeros)):
        raise SacPoleZeroError(
            'Not finite zero(s) found in pole-zero file "%s"' %
            (constant, filename))

    if not num.isfinite(constant):
        raise SacPoleZeroError(
            'Ivalid constant (%g) found in pole-zero file "%s"' %
            (constant, filename))

    if get_comments:
        return zeros, poles, constant, comments
    else:
        return zeros, poles, constant
예제 #56
0
class OutputTrap:
    """Class to trap standard output and standard error. They get logged in
    StringIO objects which are available as <instance>.out and
    <instance>.err. The class also offers summary methods which format this
    data a bit.

    A word of caution: because it blocks messages, using this class can make
    debugging very tricky. If you are having bizarre problems silently, try
    turning your output traps off for a while. You can call the constructor
    with the parameter debug=1 for these cases. This turns actual trapping
    off, but you can keep the rest of your code unchanged (this has already
    been a life saver).

    Example:

    # config: trapper with a line of dots as log separator (final '\\n' needed)
    config = OutputTrap('Config','Out ','Err ','.'*80+'\\n')

    # start trapping output
    config.trap_all()

    # now all output is logged ...
    # do stuff...

    # output back to normal:
    config.release_all()

    # print all that got logged:
    print config.summary()

    # print individual raw data:
    print config.out.getvalue()
    print config.err.getvalue()
    """
    def __init__(self,
                 name='Generic Output Trap',
                 out_head='Standard Output. ',
                 err_head='Standard Error. ',
                 sum_sep='\n',
                 debug=0,
                 trap_out=0,
                 trap_err=0,
                 quiet_out=0,
                 quiet_err=0):
        self.name = name
        self.out_head = out_head
        self.err_head = err_head
        self.sum_sep = sum_sep
        self.out = StringIO()
        self.err = StringIO()
        self.out_save = None
        self.err_save = None
        self.debug = debug
        self.quiet_out = quiet_out
        self.quiet_err = quiet_err
        if trap_out:
            self.trap_out()
        if trap_err:
            self.trap_err()

    def trap_out(self):
        """Trap and log stdout."""
        if sys.stdout is self.out:
            raise OutputTrapError, 'You are already trapping stdout.'
        if not self.debug:
            self._out_save = sys.stdout
            sys.stdout = self.out

    def release_out(self):
        """Release stdout."""
        if not self.debug:
            if not sys.stdout is self.out:
                raise OutputTrapError, 'You are not trapping stdout.'
            sys.stdout = self._out_save
            self.out_save = None

    def summary_out(self):
        """Return as a string the log from stdout."""
        out = self.out.getvalue()
        if out:
            if self.quiet_out:
                return out
            else:
                return self.out_head + 'Log by ' + self.name + ':\n' + out
        else:
            return ''

    def flush_out(self):
        """Flush the stdout log. All data held in the log is lost."""

        self.out.close()
        self.out = StringIO()

    def trap_err(self):
        """Trap and log stderr."""
        if sys.stderr is self.err:
            raise OutputTrapError, 'You are already trapping stderr.'
        if not self.debug:
            self._err_save = sys.stderr
            sys.stderr = self.err

    def release_err(self):
        """Release stderr."""
        if not self.debug:
            if not sys.stderr is self.err:
                raise OutputTrapError, 'You are not trapping stderr.'
            sys.stderr = self._err_save
            self.err_save = None

    def summary_err(self):
        """Return as a string the log from stderr."""
        err = self.err.getvalue()
        if err:
            if self.quiet_err:
                return err
            else:
                return self.err_head + 'Log by ' + self.name + ':\n' + err
        else:
            return ''

    def flush_err(self):
        """Flush the stdout log. All data held in the log is lost."""

        self.err.close()
        self.err = StringIO()

    def trap_all(self):
        """Trap and log both stdout and stderr.

        Cacthes and discards OutputTrapError exceptions raised."""
        try:
            self.trap_out()
        except OutputTrapError:
            pass
        try:
            self.trap_err()
        except OutputTrapError:
            pass

    def release_all(self):
        """Release both stdout and stderr.

        Cacthes and discards OutputTrapError exceptions raised."""
        try:
            self.release_out()
        except OutputTrapError:
            pass
        try:
            self.release_err()
        except OutputTrapError:
            pass

    def summary_all(self):
        """Return as a string the log from stdout and stderr, prepending a separator
        to each (defined in __init__ as sum_sep)."""
        sum = ''
        sout = self.summary_out()
        if sout:
            sum += self.sum_sep + sout
        serr = self.summary_err()
        if serr:
            sum += '\n' + self.sum_sep + serr
        return sum

    def flush_all(self):
        """Flush stdout and stderr"""
        self.flush_out()
        self.flush_err()

    # a few shorthands
    trap = trap_all
    release = release_all
    summary = summary_all
    flush = flush_all
예제 #57
0
    def _get_sync(self, view, shareId, peerId, name, op):

        try:
            view.refresh(None, None, False)

            if shareId is not None:
                share = view[shareId]
                peerId = share.conduit.peerId
                toRepoId = share.repoId
                collection = share.contents
                name = collection.displayName
                uuid = collection.itsUUID
                version = share.localVersion
            else:
                collection, name, uuid = self.findCollection(view, name, None)
                toRepoId = None
                version = 0

            replyTo = view[
                self.client.account].imap.replyToAddress.emailAddress
            share = self.findShare(view, collection, toRepoId, peerId)

            changes = self.computeChanges(view, version, collection, share)
            if op == 'sync' and not changes:
                share.localVersion = view.itsVersion + 1
                view.commit()
                return None

            message = MIMEMultipart()
            message['From'] = replyTo
            message['Reply-To'] = replyTo
            message['To'] = peerId
            message['Subject'] = 'Chandler sent "%s" collection' % (name)
            message['X-chandler'] = 'p2p'
            textPart = MIMEBase('text', 'plain')
            textPart.set_payload('Chandler sent "%s"' % (name))
            message.attach(textPart)
            attachment = MIMEBase('application', 'octet-stream')

            builder = TreeBuilder()
            dom = ElementTreeDOM()
            data = dom.openElement(builder, 'data')

            keys = set()
            for key, (_changes, status) in changes.iteritems():
                if key not in keys:
                    attrs = {'uuid': key.str64()}
                    if status & CItem.DELETED:
                        attrs['status'] = 'deleted'
                        item = dom.openElement(data, 'item', **attrs)
                    else:
                        if key in collection:
                            attrs['status'] = 'member'
                        item = dom.openElement(data, 'item', **attrs)
                        share.format.exportProcess(dom, key, item, changes,
                                                   keys)
                    dom.closeElement(data, 'item')
                elif key in collection:
                    item = dom.openElement(data,
                                           'item',
                                           uuid=key.str64(),
                                           status='member')
                    dom.closeElement(data, 'item')

            dom.closeElement(builder, 'data')
            out = StringIO()
            ElementTree(builder.close()).write(out, 'utf-8')
            data = compress(out.getvalue())
            out.close()

            message['X-chandler-p2p-name'] = name
            message['X-chandler-p2p-from'] = self._repoId.str64()
            if toRepoId is not None:
                message['X-chandler-p2p-to'] = toRepoId.str64()
            message['X-chandler-p2p-item'] = "%s-%d" % (uuid.str64(),
                                                        view.itsVersion)
            message['X-chandler-p2p-op'] = 'sync'

            attachment.set_payload(data)
            encode_base64(attachment)
            attachment.add_header('Content-Disposition',
                                  'attachment',
                                  name=name)
            message.attach(attachment)
        except:
            view.cancel()
            raise

        share.localVersion = view.itsVersion + 1
        share.established = True
        share.ackPending = True
        view.commit()

        return message
예제 #58
0
파일: graph.py 프로젝트: rit/arches
    def get(self, request, graphid, nodeid=None):
        if self.action == 'export_graph':
            graph = get_graphs_for_export([graphid])
            graph['metadata'] = system_metadata()
            f = JSONSerializer().serialize(graph, indent=4)
            graph_name = JSONDeserializer().deserialize(f)['graph'][0]['name']

            response = HttpResponse(f, content_type='json/plain')
            response[
                'Content-Disposition'] = 'attachment; filename="%s.json"' % (
                    graph_name)
            return response
        elif self.action == 'export_mapping_file':
            files_for_export = create_mapping_configuration_file(graphid)
            file_name = Graph.objects.get(graphid=graphid).name

            buffer = StringIO()

            with zipfile.ZipFile(buffer, 'w', zipfile.ZIP_DEFLATED) as zip:
                for f in files_for_export:
                    f['outputfile'].seek(0)
                    zip.writestr(f['name'], f['outputfile'].read())

            zip.close()
            buffer.flush()
            zip_stream = buffer.getvalue()
            buffer.close()

            response = HttpResponse()
            response[
                'Content-Disposition'] = 'attachment; filename=' + file_name + '.zip'
            response['Content-length'] = str(len(zip_stream))
            response['Content-Type'] = 'application/zip'
            response.write(zip_stream)
            return response

        elif self.action == 'get_domain_connections':
            res = []
            graph = Graph.objects.get(graphid=graphid)
            ontology_class = request.GET.get('ontology_class', None)
            ret = graph.get_valid_domain_ontology_classes()
            for r in ret:
                res.append({
                    'ontology_property':
                    r['ontology_property'],
                    'ontology_classes':
                    [c for c in r['ontology_classes'] if c == ontology_class]
                })
            return JSONResponse(res)

        else:
            graph = Graph.objects.get(graphid=graphid)
            if self.action == 'get_related_nodes':
                parent_nodeid = request.GET.get('parent_nodeid', None)
                ret = graph.get_valid_ontology_classes(
                    nodeid=nodeid, parent_nodeid=parent_nodeid)

            elif self.action == 'get_valid_domain_nodes':
                ret = graph.get_valid_domain_ontology_classes(nodeid=nodeid)

            return JSONResponse(ret)

        return HttpResponseNotFound()
예제 #59
0
 def _image(self, node):
     import urllib
     import urlparse
     from reportlab.lib.utils import ImageReader
     nfile = node.get('file')
     if not nfile:
         if node.get('name'):
             image_data = self.images[node.get('name')]
             self._logger.debug("Image %s used", node.get('name'))
             s = StringIO(image_data)
         else:
             if self.localcontext:
                 res = utils._regex.findall(node.text)
                 for key in res:
                     newtext = eval(key, {}, self.localcontext)
                     node.text = newtext
             image_data = None
             if node.text:
                 image_data = base64.decodestring(node.text)
             if image_data:
                 s = StringIO(image_data)
             else:
                 self._logger.debug("No image data!")
                 return False
     else:
         if nfile in self.images:
             s = StringIO(self.images[nfile])
         else:
             try:
                 up = urlparse.urlparse(str(nfile))
             except ValueError:
                 up = False
             if up and up.scheme:
                 # RFC: do we really want to open external URLs?
                 # Are we safe from cross-site scripting or attacks?
                 self._logger.debug("Retrieve image from %s", nfile)
                 u = urllib.urlopen(str(nfile))
                 s = StringIO(u.read())
             else:
                 self._logger.debug("Open image file %s ", nfile)
                 s = _open_image(nfile, path=self.path)
     try:
         img = ImageReader(s)
         (sx, sy) = img.getSize()
         self._logger.debug("Image is %dx%d", sx, sy)
         args = {'x': 0.0, 'y': 0.0}
         for tag in ('width', 'height', 'x', 'y'):
             if node.get(tag):
                 args[tag] = utils.unit_get(node.get(tag))
         if ('width' in args) and (not 'height' in args):
             args['height'] = sy * args['width'] / sx
         elif ('height' in args) and (not 'width' in args):
             args['width'] = sx * args['height'] / sy
         elif ('width' in args) and ('height' in args):
             if (float(args['width']) / args['height']) > (float(sx) > sy):
                 args['width'] = sx * args['height'] / sy
             else:
                 args['height'] = sy * args['width'] / sx
         self.canvas.drawImage(img, **args)
     finally:
         s.close()
예제 #60
0
    def _result_sync(self, view, uid, message, fromAddress):

        try:
            view.refresh(None, None, False)

            repoId = UUID(message['X-chandler-p2p-from'])
            uuid, version = message['X-chandler-p2p-item'].split('-')
            uuid = UUID(uuid)
            version = int(version)
            name = message['X-chandler-p2p-name']
            self.client.output("processing '%s'" % (name))

            collection = view.find(uuid)
            if collection is None:
                collection = pim.SmartCollection(itsView=view,
                                                 _uuid=uuid,
                                                 displayName=name)
                schema.ns("osaf.app", view).sidebarCollection.add(collection)

                # for now, grant READ to everyone
                acl = ACL()
                acl.append(
                    ACE(schema.ns('p2p', view).all.itsUUID, Permissions.READ))
                collection.setACL(acl, 'p2p')
                isNew = True
            else:
                isNew = False

            share = self.findShare(view, collection, repoId, fromAddress)
            format = share.format

            if isNew:
                share.localVersion = view.itsVersion + 1
            else:
                changes = self.computeChanges(view, share.localVersion,
                                              collection, share)
                if not changes:
                    share.localVersion = view.itsVersion + 1

            payload = message.get_payload(1).get_payload()
            dom = ElementTreeDOM()
            input = StringIO(decompress(b64decode(payload)))
            data = ElementTree(file=input).getroot()
            input.close()

            share.remoteVersion = version
            view.deferDelete()

            for itemElement in dom.iterElements(data):
                attributes = dom.getAttributes(itemElement)
                status = attributes.get('status')
                if status == 'deleted':
                    item = view.findUUID(attributes['uuid'])
                    if item is not None:
                        item.delete()
                else:
                    child = dom.getFirstChildElement(itemElement)
                    if child is not None:
                        item = format.importProcess(dom, child)
                    else:
                        item = view.findUUID(attributes['uuid'])

                    if status == 'member':
                        collection.inclusions.add(item)

            # Kludge until masterEvents filter patch on bug 6970 is checked in
            for item in collection.inclusions:
                if pim.has_stamp(item, pim.EventStamp):
                    event = pim.EventStamp(item)
                    if event.rruleset is not None:
                        event.getMaster().getFirstOccurrence()

        except:
            view.cancel()
            raise

        share.established = True
        view.commit()
        self.client.output("'%s' synchronized" % (collection.displayName))

        replyTo = view[self.client.account].imap.replyToAddress.emailAddress

        receipt = MIMEText('Chandler sent a receipt for "%s"' % (name))
        receipt['From'] = replyTo
        receipt['Reply-To'] = replyTo
        receipt['To'] = message.get('replyTo') or fromAddress
        receipt['Subject'] = "Chandler sent a receipt"
        receipt['X-chandler'] = 'p2p'
        receipt['X-chandler-p2p-name'] = name
        receipt['X-chandler-p2p-from'] = self._repoId.str64()
        receipt['X-chandler-p2p-to'] = repoId.str64()
        receipt['X-chandler-p2p-item'] = "%s-%d" % (uuid.str64(),
                                                    share.localVersion)
        receipt['X-chandler-p2p-op'] = 'receipt'

        return receipt