def batch_import():
    dict = read_csv("format.csv")
    fl = os.listdir("cnpng")
    for fn in fl:
        if fn[-4:].lower() in [".tga", ".png"]:
            (bflim_name ,width , height ,format_name , swizzle) = dict[fn[:-4]]
            if os.path.exists("tmp\\tmp.dds"):
                os.remove("tmp\\tmp.dds")
            if os.path.exists("tmp\\tmp.gtx"):
                os.remove("tmp\\tmp.gtx")
            runPNG2DDS(format_name ,"cnpng\\%s"%fn , "cndds\\\\%s.dds"%fn)
            runPNG2DDS(format_name ,"cnpng\\%s"%fn , "tmp\\tmp.dds")
            runTexconv("tmp\\tmp.dds" , "tmp\\tmp.gtx" , swizzle)
            fs = open("tmp\\tmp.gtx" ,"rb")
            fs.seek(0xf0)
            data_len = struct.unpack(">I" , fs.read(4))[0]
            print("%08x"%data_len)
            fs.seek(0xfc)
            dataA = fs.read(data_len)
            print("%08x"%len(dataA))
            fs.close()
            o_file = open(bflim_name , "rb")
            buffer = StringIO()
            data2 = o_file.read()
            buffer.seek(0)
            buffer.write(data2)
            o_file.close()
            buffer.seek(0)
            buffer.write(dataA)
            if not os.path.exists("REPACK\\" + "\\".join(("%s"%(bflim_name)).split("\\")[1:-1])):
                os.makedirs("REPACK\\" + "\\".join(("%s"%(bflim_name)).split("\\")[1:-1]))
            dest = open("REPACK\\" + "\\".join(("%s"%(bflim_name)).split("\\")[1:]) , "wb")
            dest.write(buffer.getvalue())
            dest.close()
            buffer.flush()
Example #2
0
class xtriage_output(printed_output):
    def __init__(self, out):
        super(xtriage_output, self).__init__(out)
        self.gui_output = True
        self._out_orig = self.out
        self.out = StringIO()
        self._sub_header_to_out = {}

    def show_big_header(self, text):
        pass

    def show_header(self, text):
        self._out_orig.write(self.out.getvalue())
        self.out = StringIO()
        super(xtriage_output, self).show_header(text)

    def show_sub_header(self, title):
        self._out_orig.write(self.out.getvalue())
        self.out = StringIO()
        self._current_sub_header = title
        assert title not in self._sub_header_to_out
        self._sub_header_to_out[title] = self.out

    def flush(self):
        self._out_orig.write(self.out.getvalue())
        self.out.flush()
        self._out_orig.flush()
Example #3
0
def _make_csv_report(q, display_headers, raw_headers, pagetitle, filename,
                     linker=(lambda colid, value: None)):
    def make_record(itm, hdr):
        if isinstance(itm, dict):
            return tuple(itm[h] for h in hdr)
        return tuple(getattr(itm, h) for h in hdr)

    # If a direct csv file is request
    if request.args.get('csvfile', None) == '1':
        io = StringIO()
        writer = csv.writer(io)
        writer.writerow(raw_headers)
        for itm in q:
            writer.writerow(make_record(itm, raw_headers))
        io.flush()
        io.seek(0, 0)   # important! we want send_file to read from the front
        return send_file(
            io,
            as_attachment=True,
            attachment_filename='%s.csv' % filename
        )

    # Otherwise show the page.
    return render_template(
        'admin.csvdata.html',
        headers=display_headers,
        items=[make_record(itm, raw_headers) for itm in q],
        pagetitle=pagetitle,
        linker=linker,
    )
Example #4
0
    def show_image(self, colorbar=False):
        # start xpans if needed
        ds9.ds9_xpans()
        # start ds9 if need, or connect to existing
        display = ds9.ds9(target='validate')
        if self.frame_number is None:
            # display.set('frame delete all')
            display.set('frame new')
            display.set('scale zscale')
            display.set('cmap invert yes')
            f = StringIO()
            self.hdulist.writeto(f)
            f.flush()
            f.seek(0)
            hdulist = fits.open(f)
            for hdu in hdulist:
                del(hdu.header['PV*'])
            display.set_pyfits(hdulist)
            self.frame_number = display.get('frame frameno')
            display.set('frame center {}'.format(self.frame_number))
            display.set('zoom to fit')
            display.set('wcs align yes')
        display.set('frame frameno {}'.format(self.frame_number))

        self._interaction_context = InteractionContext(self)

        self.number_of_images_displayed += 1
Example #5
0
    def test_notify_not_tested(self):
        """
        Ensure interested parties get the not tested test notifications
        """
        gpg = self.gpg
        datafile = os.path.join(self.tempdir, 'data')

        io = StringIO()
        print >> io, 'Has 1 result'
        #                      1         2         3         4         5         6         7            # NOQA
        #            01234567890123456789012345678901234567890123456789012345678901234567890123456789   # NOQA
        print >> io, '999X 12345                                      P              NT  N  N'  # NOQA
        io.flush()
        io.seek(0)
        gpg.encrypt_file(io,
                         str(self.key),
                         passphrase=self.passphrase,
                         output=datafile)
        io.close()

        sys.argv = ['', '-c', self.configfile, datafile]
        scripts.parse.main()

        emails = turbomail.interface.manager.transport.get_sent_mails()
        # 2 Emails sent. One for HIV. One for missing draw dates.
        eq_(2, len(emails))
        email_content = emails[0]
        assert 'result updates' in email_content
        assert 'DHIV' in email_content
        assert '999X' in email_content
        assert re.search('Other:\s+1', email_content)
Example #6
0
    def test_notify_missing_draw_dates(self):
        """
        Ensure interested parties get missing draw date notifications
        """
        gpg = self.gpg
        datafile = os.path.join(self.tempdir, 'data')

        io = StringIO()
        print >> io, 'Has 1 result'
        #                      1         2         3         4         5         6         7            # NOQA
        #            01234567890123456789012345678901234567890123456789012345678901234567890123456789   # NOQA
        print >> io, '999X 12345                                      P               N  N  N'  # NOQA
        io.flush()
        io.seek(0)
        gpg.encrypt_file(io,
                         str(self.key),
                         passphrase=self.passphrase,
                         output=datafile)
        io.close()

        sys.argv = ['', '-c', self.configfile, datafile]
        scripts.parse.main()

        emails = turbomail.interface.manager.transport.get_sent_mails()
        # Only one email should have been sent for missing draw results
        eq_(1, len(emails))
        email_content = emails[0]
        assert 'AEH-Redcross update' in email_content
        assert 'matching draw date' in email_content
        assert 'not entered' in email_content
        assert re.search('received\s+1', email_content)
Example #7
0
    def test_dry(self):
        """
        Ensure --dry option doesn't affect the file system and database
        """
        config = self.config
        gpg = self.gpg
        datafile = os.path.join(self.tempdir, 'data')

        io = StringIO()
        print >> io, 'Has 1 result'
        print >> io, '076C 12345                                      P'
        io.flush()
        io.seek(0)
        gpg.encrypt_file(io,
                         str(self.key),
                         passphrase=self.passphrase,
                         output=datafile)
        io.close()

        sys.argv = ['', '-c', self.configfile, '--dry', datafile]
        scripts.parse.main()

        eq_(0, Session.query(models.Result).count())
        assert not os.path.exists(
            os.path.join(config.get('settings', 'dir.web'), '76C.html'))
        assert not os.path.exists(
            os.path.join(config.get('settings', 'dir.web'), '76C.xls'))
Example #8
0
    def zip_response(self,
                     files_for_export,
                     zip_file_name=None,
                     file_type=None):
        '''
        Given a list of export file names, zips up all the files with those names and returns and http response.
        '''
        buffer = StringIO()

        with zipfile.ZipFile(buffer, 'w', zipfile.ZIP_DEFLATED) as zip:
            for f in files_for_export:
                f['outputfile'].seek(0)
                zip.writestr(f['name'], f['outputfile'].read())

        zip.close()
        buffer.flush()
        zip_stream = buffer.getvalue()
        buffer.close()

        response = HttpResponse()
        response[
            'Content-Disposition'] = 'attachment; filename=' + zip_file_name
        response['Content-length'] = str(len(zip_stream))
        response['Content-Type'] = 'application/zip'
        response.write(zip_stream)
        return response
Example #9
0
    def _run_step(self,
                  step_num,
                  step_type,
                  input_path,
                  output_path,
                  working_dir,
                  env,
                  child_stdin=None):
        step = self._get_step(step_num)

        common_args = (['--step-num=%d' % step_num] +
                       self._mr_job_extra_args(local=True))

        if step_type == 'mapper':
            child_args = (['--mapper'] + [input_path] + common_args)
        elif step_type == 'reducer':
            child_args = (['--reducer'] + [input_path] + common_args)
        elif step_type == 'combiner':
            child_args = ['--combiner'] + common_args + ['-']

        child_instance = self._mrjob_cls(args=child_args)

        has_combiner = (step_type == 'mapper' and 'combiner' in step)

        # Use custom stdin
        if has_combiner:
            child_stdout = StringIO()
        else:
            child_stdout = open(output_path, 'w')

        with save_current_environment():
            with save_cwd():
                os.environ.update(env)
                os.chdir(working_dir)

                child_instance.sandbox(stdin=child_stdin, stdout=child_stdout)
                child_instance.execute()

        if has_combiner:
            sorted_lines = sorted(child_stdout.getvalue().splitlines())
            combiner_stdin = StringIO('\n'.join(sorted_lines))
        else:
            child_stdout.flush()

        child_stdout.close()

        while len(self._counters) <= step_num:
            self._counters.append({})
        child_instance.parse_counters(self._counters[step_num])

        if has_combiner:
            self._run_step(step_num,
                           'combiner',
                           None,
                           output_path,
                           working_dir,
                           env,
                           child_stdin=combiner_stdin)

            combiner_stdin.close()
Example #10
0
    def test_DrawDateSync_ResultHasNoCorrespondingDrawDateInDrawTable_ResultDrawDateIsNone(
            self):
        """
        Result draw dates should be None if no available corresponding draw date in Draw table
        Sync code is in parser.py
        """

        site_code = '999X'
        reference_number = '12345'

        gpg = self.gpg
        datafile = os.path.join(self.tempdir, 'data')

        io = StringIO()
        print >> io, 'Has 1 result'
        print >> io, '999X 12345                                      P               N  N  N'  # NOQA
        io.flush()
        io.seek(0)
        gpg.encrypt_file(io,
                         str(self.key),
                         passphrase=self.passphrase,
                         output=datafile)
        io.close()

        sys.argv = ['', '-c', self.configfile, datafile]
        scripts.parse.main()

        result = Session.query(models.Result)\
            .filter(models.Result.site_code == site_code)\
            .filter(models.Result.reference_number == reference_number).first()

        # Is there a draw date?
        assert not result.draw_date
Example #11
0
    def findContentSoup( self, wiktionaryPage ):
        page = BeautifulSoup( wiktionaryPage )
        bodyContent = page.find( 'div', id='bodyContent' )
        self.pullUpHeadSpanContent( bodyContent )

        buffer = StringIO()
#        buffer.write( '<!DOCTYPE html><html lang="en"><head><meta charset=utf-8></head><body>\n')
        h2s = bodyContent.findAll( 'h2', {'class':'head'} )

        englishHead = h2s[0]
        otherLangHead = None
        if len( h2s ) > 1:
            otherLangHead = h2s[1]

        g = englishHead.nextSiblingGenerator()
        n = g.next()
        while otherLangHead != n and n:
            print n
            buffer.write( str( n ) )
            buffer.write( '\n' )
            n = g.next()
#        buffer.write( '</body></html>' )
        buffer.flush()

        return BeautifulSoup( buffer.getvalue(), fromEncoding='utf-8' )
Example #12
0
def updateSourceFile(fname):
    fname = os.path.realpath(fname)
    if os.path.splitext(fname)[1] == '.py':
        thedict = {'stack':[],
               'repo':[],
               'unknown':[]}
        fileLines = StringIO()
        with open(fname,'r') as fid:
            lines = fid.read().splitlines()
        quotinBig = False
        quotinSmall = False
        impSection = False
        stacking = True
        for line in lines:
            if line.startswith('"""') and impSection:
                impSection = False
                stacking = True
                continue
            if line.find('"""IMPORTERATOR') != -1:
                impSection = True
                continue
            if line.find('IMPORTERATOR_FROM_REPO') != -1:
                stacking = False
                continue
            if (line.strip().startswith('"""') or line.strip().endswith('"""')) and line.find('IMPORTERATOR') == -1:
                if not quotinBig:
                    quotinBig = True
                else:
                    quotinBig = False
            if line.strip().startswith("'''") or line.strip().endswith("'''"):
                if not quotinSmall:
                    quotinSmall = True
                else:
                    quotinSmall = False
            if line.find('import ') != -1 and not (quotinBig or quotinSmall) and \
                    line.find('import os') == -1 and line.find('import sys') == -1 and line.find('import Importerator') == -1:
                if impSection:
                    if stacking:
                        thedict['stack'].append('{}\n'.format(line))
                    else:
                        thedict['repo'].append('{}\n'.format(line))
                else:
                    thedict['unknown'].append('{}\n'.format(line))
            else:
                fileLines.write('{}\n'.format(line))
    with open(fname, 'w') as outfid:
        outfid.write('"""IMPORTERATOR\n')
        for line in thedict['stack']:
            outfid.write(line)
        if thedict['repo']:
            outfid.write('IMPORTERATOR_FROM_REPO\n')
        for line in thedict['repo']:
            outfid.write(line)
        if thedict['unknown']:
            outfid.write('IMPORTERATOR_UNKNOWN\n')
        for line in thedict['unknown']:
            outfid.write(line)
        outfid.write('"""\n')
        fileLines.flush()
        outfid.write(fileLines.getvalue())
Example #13
0
        def buildCSV(formItem, items):

            items = list(items)
            keys = set()
            map(lambda item: keys.update(item.data.keys()), items)
            keys = ['ctime'] + list(keys)
            
            def genRows():

                # Headers
                yield dict([(k,k) for k in keys])

                # Answer set
                for item in items:
                    rv = {}
                    rv.update( dict([(key,item.data.get(key,'') or '') for key in keys]) )
                    rv['ctime'] = str(item.ctime)
                    yield rv

            buf = StringIO()
            writer = csv.DictWriter(buf, keys)
            writer.writerows(csvutil.encodeDictFilter(genRows()))
            buf.flush()

            from nevow import inevow, static
            filename=str('%s_data.csv'%formItem.name)
            inevow.IRequest(ctx).setHeader('Cache-Control',
                    'no-cache, must-revalidate, no-store')
            inevow.IRequest(ctx).setHeader('Content-disposition',
                    'attachement ; filename=%s'%filename)
            return static.Data(buf.getvalue(), mimetypes.guess_type(filename)[0])
Example #14
0
    def show_image(self, colorbar=False):
        # start xpans if needed
        ds9.ds9_xpans()
        # start ds9 if need, or connect to existing
        display = ds9.ds9(target='validate')
        if self.frame_number is None:
            # display.set('frame delete all')
            display.set('frame new')
            display.set('scale zscale')
            display.set('cmap invert yes')
            f = StringIO()
            self.hdulist.writeto(f)
            f.flush()
            f.seek(0)
            hdulist = fits.open(f)
            for hdu in hdulist:
                del(hdu.header['PV*'])
            display.set_pyfits(hdulist)
            f.close()
            del(hdulist)
            self.frame_number = display.get('frame frameno')
            display.set('frame center {}'.format(self.frame_number))
            display.set('zoom to fit')
            display.set('wcs align yes')
        display.set('frame frameno {}'.format(self.frame_number))

        self._interaction_context = InteractionContext(self)

        self.number_of_images_displayed += 1
Example #15
0
    def zip_response(self, shapefile_path, file_name, mimetype, readme=None):
        buffer = StringIO()
        zip = zipfile.ZipFile(buffer, 'w', zipfile.ZIP_DEFLATED)
        files = ['shp', 'shx', 'prj', 'dbf']
        for item in files:
            filename = '%s.%s' % (shapefile_path.replace('.shp', ''), item)
            zip.write(filename,
                      arcname='%s.%s' % (file_name.replace('.shp', ''), item))
        if readme:
            zip.writestr('README.txt', readme)
        for dbf_stream in self.dbf_streams:
            zip.writestr(dbf_stream['name'], dbf_stream['stream'])
        zip.close()
        buffer.flush()
        zip_stream = buffer.getvalue()
        buffer.close()

        # Stick it all in a django HttpResponse
        response = HttpResponse()
        response[
            'Content-Disposition'] = 'attachment; filename=%s.zip' % file_name.replace(
                '.shp', '')
        response['Content-length'] = str(len(zip_stream))
        response['Content-Type'] = mimetype
        response.write(zip_stream)
        return response
def report_generic_detailed(self, request, queryset):
    if len(queryset) == 1:
        response = HttpResponse(mimetype="application/pdf")
        response["Content-Disposition"] = "attachment; filename=%s.pdf" % (
            smart_str(queryset.model._meta.verbose_name.upper())
        )
        html = html_report_generic_detailed(get_header_detailed(self, queryset), self.fieldsets_report, queryset[0])
        pdf = pisa.CreatePDF(html, response)
        return response
    else:
        response = HttpResponse(mimetype="application/zip")
        response["Content-Disposition"] = "filename=" + _("Relatorio") + ".zip"
        buffer = StringIO()
        zip = zipfile.ZipFile(buffer, "w", zipfile.ZIP_DEFLATED)

        local = settings.MEDIA_ROOT
        for key, query in enumerate(queryset):
            nome = "%s-%s.pdf" % (remove_sc(smart_str(queryset.model._meta.verbose_name.upper())), key)

            html = html_report_generic_detailed(get_header_detailed(self, queryset), self.fieldsets_report, query)
            arquivo = file(local + nome, "w")
            pdf = pisa.CreatePDF(html, arquivo)
            arquivo.close()
            zip.write(local + nome, nome)
            os.remove(local + nome)
        zip.close()
        buffer.flush()
        ret_zip = buffer.getvalue()
        buffer.close()
        response.write(ret_zip)
        return response
Example #17
0
def get_record(fasta_file, n):
    """
    Process the n-th record from the fasta file.
    @param n: Number record to process from fasta. Starts with 1.
    @deprecated: Not based on N records, but N fasta files
    """
    raise Exception("NO MORE")
    i = 0
    i = int(2*n)
    cmd = "head -%i %s" % (i,fasta_file)
    out,err = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).communicate()
    io = StringIO()
    #io.write(out)
    with open(fasta_file) as handle:
        for line in handle:
            i+=1
            if n*2==i or n*2-1==i:
                io.write(line)
            if i>n*2:
                break

    #cmd = "head -%i %s | tail -2" % (i,fasta_file)
    #out,err = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).communicate()
    #_debug(out)
    # Flush and point to beginning of handle for reading
    io.flush()
    io.reset()
    _debug(io.getvalue())
    records = list(SeqIO.parse(io, "fasta"))
    assert len(records)==1
    return records[0]
Example #18
0
def targetswitcher(refinement_machine,targetdir,targets,substitutions):

    if refinement_machine:
        name = "TargetSwitcher-rm"
        print "Generating TargetSwitcher-rm: %s" % (name)
        type_param = "rm"
    else:
        name="TargetSwitcher"
        print "Generating TargetSwitcher: %s" % (name)
        type_param = "am"

    handle = StringIO()
    generatetaskswitcher.main(targetdir,type_param,None,targets,handle)
    handle.flush()
    handle.seek(0)
    taskswitcher = handle.readlines()
    handle.close()

    outputfile=os.path.join(targetdir,"%s.lsts" % name)
    with open(outputfile,'w') as handle:
        for line in taskswitcher:
            for subs in substitutions:
                line = subs[0].sub(subs[1],line)
            handle.write(line)

    return name
Example #19
0
def cleanup_css(css_input, minified=True):
    """Cleanup CSS code delivered in `css_input`, a string.

    Returns 2-item tuple ``(<CSS>, <ERRORS>)`` where ``<CSS>`` is the
    cleaned and minimized CSS code and ``<ERRORS>`` is a multiline
    string containing warnings and errors occured during processing
    the CSS.

    By default the ``<CSS>`` returned is minified to reduce network
    load, etc. If you want pretty non-minified output, set `minified`
    to ``False``.

    We expect and return texts, not bytestreams.
    """
    # Set up a local logger for warnings and errors
    local_log = StringIO()
    handler = logging.StreamHandler(local_log)
    handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
    handler.propagate = False
    handler.setLevel(logging.WARNING)
    logger = logging.getLogger()
    logger.addHandler(handler)

    cssutils.log.setLog(logger)
    cssutils.ser.prefs.useDefaults()
    if minified is True:
        cssutils.ser.prefs.useMinified()

    sheet = cssutils.parseString(css_input)

    local_log.flush()
    encoding = sheet.encoding or 'utf-8'
    css_text = sheet.cssText.decode(encoding)
    return css_text, local_log.getvalue()
Example #20
0
    def __repairFile(self, file):
        '''
        Reapair JSON file if necessary

        If the JSON file is not closed properly, perhaps due a system
        crash during a test run, then the JSON is repaired by
        discarding the trailing, incomplete item and appending braces
        to the file to close the JSON object.

        The repair is performed on a string buffer, and the given file
        is never written to. This allows the file to be safely read
        during a test run.

        :return: If no repair occured, then ``file`` is returned.
                Otherwise, a new file object containing the repaired JSON
                is returned.
        '''

        file.seek(0)
        lines = file.readlines()

        # JSON object was not closed properly.
        #
        # To repair the file, we execute these steps:
        #   1. Find the closing brace of the last, properly written
        #      test result.
        #   2. Discard all subsequent lines.
        #   3. Remove the trailing comma of that test result.
        #   4. Append enough closing braces to close the json object.
        #   5. Return a file object containing the repaired JSON.

        # Each non-terminal test result ends with this line:
        safe_line =  3 * JSONWriter.INDENT * ' ' + '},\n'

        # Search for the last occurence of safe_line.
        safe_line_num = None
        for i in range(-1, - len(lines), -1):
            if lines[i] == safe_line:
                safe_line_num = i
                break

        if safe_line_num is None:
            raise Exception('failed to repair corrupt result file: ' + file.name)

        # Remove corrupt lines.
        lines = lines[0:(safe_line_num + 1)]

        # Remove trailing comma.
        lines[-1] = 3 * JSONWriter.INDENT * ' ' + '}\n'

        # Close json object.
        lines.append(JSONWriter.INDENT * ' ' + '}\n')
        lines.append('}')

        # Return new file object containing the repaired JSON.
        new_file = StringIO()
        new_file.writelines(lines)
        new_file.flush()
        new_file.seek(0)
        return new_file
class TestPrefilterFrontEnd(PrefilterFrontEnd):
    
    input_prompt_template = string.Template('')
    output_prompt_template = string.Template('')
    banner = ''

    def __init__(self):
        ipython0 = get_ipython0().IP
        self.out = StringIO()
        PrefilterFrontEnd.__init__(self, ipython0=ipython0)
        # Clean up the namespace for isolation between tests
        user_ns = self.ipython0.user_ns
        # We need to keep references to things so that they don't
        # get garbage collected (this stinks).
        self.shadow_ns = dict()
        for i in self.ipython0.magic_who_ls():
            self.shadow_ns[i] = user_ns.pop(i)
        # Some more code for isolation (yeah, crazy)
        self._on_enter()
        self.out.flush()
        self.out.reset()
        self.out.truncate()

    def write(self, string, *args, **kwargs):
       self.out.write(string) 

    def _on_enter(self):
        self.input_buffer += '\n'
        PrefilterFrontEnd._on_enter(self)
Example #22
0
    def create_gon_file(self):

        output = """        {
            "type": "Goniometer", 
            "properties": {
                "radius": %(radius)f, 
                "divergence": %(divergence)f, 
                "soller1": %(soller1)f, 
                "soller2": %(soller2)f, 
                "min_2theta": %(twotheta_min)f, 
                "max_2theta": %(twotheta_max)f, 
                "steps": %(twotheta_count)f, 
                "wavelength": %(alpha_average)f, 
                "has_ads": false, 
                "ads_fact": 1.0, 
                "ads_phase_fact": 1.0, 
                "ads_phase_shift": 0.0, 
                "ads_const": 0.0
            }
        }""" % dict(
            radius=float(not_none(self.radius, 25)),
            divergence=float(not_none(self.divergence, 0.5)),
            soller1=float(not_none(self.soller1, 2.5)),
            soller2=float(not_none(self.soller2, 2.5)),
            twotheta_min=float(not_none(self.twotheta_min, 3.0)),
            twotheta_max=float(not_none(self.twotheta_max, 45.0)),
            twotheta_count=float(not_none(self.twotheta_count, 2500)),
            alpha_average=float(not_none(self.alpha_average, 0.154056)),
        )
        f = StringIO(output)
        f.flush()
        return f
Example #23
0
def bcompile(source):
    """Return the compiled bytecode from the given filename as a string ."""
    f = open(source, 'U')
    try:
        try:
            timestamp = long(os.fstat(f.fileno()).st_mtime)
        except AttributeError:
            timestamp = long(os.stat(file).st_mtime)
        codestring = f.read()
        f.close()
        if codestring and codestring[-1] != '\n':
            codestring = codestring + '\n'
        try:
            codeobject = __builtin__.compile(codestring, source, 'exec')
        except Exception,err:
            raise PyCompileError(err.__class__, err.args, source)
        fc = StringIO()
        try:
            fc.write('\0\0\0\0')
            wr_long(fc, timestamp)
            fc.write(marshal.dumps(codeobject))
            fc.flush()
            fc.seek(0, 0)
            fc.write(MAGIC)
            return fc.getvalue()
        finally:
            fc.close()
Example #24
0
class StringOutputStream(object):

    def __init__(self):

        self.output = StringIO()

    def write(self, data):

        self.output.write(data)

    def flush(self):

        self.output.flush()

    def close(self):

        self.output.flush()
        self.string = self.output.getvalue()
        self.output.close()

    def getString(self):

        string = getattr(self, 'string', None)
        if string is None:
            raise ValueError, 'StreamOutputStream is still open'

        return string
Example #25
0
File: static.py Project: sklam/mlvm
 def __str__(self):
     sio = StringIO()
     self.write(sio)
     sio.flush()
     str = sio.getvalue()
     sio.close()
     return str
Example #26
0
 def get_compressed_file(self):
     in_memory_zip = StringIO()
     zf = zipfile.ZipFile(in_memory_zip, "w", zipfile.ZIP_DEFLATED)
     zf.writestr(self.get_filename(), self.get_content().getvalue())
     zf.close()
     in_memory_zip.flush()
     return in_memory_zip
Example #27
0
def cleanup_css(css_input, minified=True):
    """Cleanup CSS code delivered in `css_input`, a string.

    Returns 2-item tuple ``(<CSS>, <ERRORS>)`` where ``<CSS>`` is the
    cleaned and minimized CSS code and ``<ERRORS>`` is a multiline
    string containing warnings and errors occured during processing
    the CSS.

    By default the ``<CSS>`` returned is minified to reduce network
    load, etc. If you want pretty non-minified output, set `minified`
    to ``False``.

    We expect and return texts, not bytestreams.
    """
    # Set up a local logger for warnings and errors
    local_log = StringIO()
    handler = logging.StreamHandler(local_log)
    handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
    handler.propagate = False
    handler.setLevel(logging.WARNING)
    logger = logging.getLogger()
    logger.addHandler(handler)

    cssutils.log.setLog(logger)
    cssutils.ser.prefs.useDefaults()
    if minified is True:
        cssutils.ser.prefs.useMinified()

    sheet = cssutils.parseString(css_input)

    local_log.flush()
    encoding = sheet.encoding or 'utf-8'
    css_text = sheet.cssText.decode(encoding)
    return css_text, local_log.getvalue()
Example #28
0
 def get_compressed_file(self):
     in_memory_zip = StringIO()
     zf = zipfile.ZipFile(in_memory_zip, "w", zipfile.ZIP_DEFLATED)
     zf.writestr(self.get_filename(), self.get_content().getvalue())
     zf.close()
     in_memory_zip.flush()
     return in_memory_zip
Example #29
0
def _make_csv_report(q, display_headers, raw_headers, pagetitle, filename):
    def make_record(itm, hdr):
        if isinstance(itm, dict):
            return tuple(itm[h] for h in hdr)
        return tuple(getattr(itm, h) for h in hdr)

    # If a direct csv file is request
    if request.args.get('csvfile', None) == '1':
        io = StringIO()
        writer = csv.writer(io)
        writer.writerow(raw_headers)
        for itm in q:
            writer.writerow(make_record(itm, raw_headers))
        io.flush()
        io.seek(0, 0)   # important! we want send_file to read from the front
        return send_file(
            io,
            as_attachment=True,
            attachment_filename='%s.csv' % filename
        )

    # Otherwise show the page.
    return render_template(
        'admin.csvdata.html',
        headers=display_headers,
        items=[make_record(itm, raw_headers) for itm in q],
        pagetitle=pagetitle,
    )
Example #30
0
class ClientProtocol(Protocol):
    def __init__(self):
        self.rec = StringIO()

    def connectionMade(self):
        self.factory.run(self)

    def connectionLost(self, reason):
        self.factory.stop()

    def dataReceived(self, data):
        self.rec.write(data)
        self.rec.flush()
        self.rec = StringIO(self.rec.getvalue())
        pickling = True
        while pickling:
            try:
                result = pickle.load(self.rec)
            except (pickle.UnpicklingError, ValueError):
                pickling = False
            except EOFError:
                self.rec.close()
                self.rec = StringIO()
                pickling = False
            except Exception as e:
                self.factory.fail(e)
                pickling = False
            else:
                self.factory.notify(result)

    def send(self, obj):
        self.transport.write(pickle.dumps(obj))
Example #31
0
def insert_into_file(fileobj, data, start, end):
    """
    Insert data into fileobj at position C{start}.

    This function inserts data into a file, overwriting all data between start
    and end. If end == start no data is overwritten. Do not use this function to
    append data to a file.

    @param fileobj: file like object
    @param data:    data to be inserted into fileobj
    @param start:   The position at which to start inserting data
    @param end:     The position in fileobj of data that must not be overwritten
    @return:        C{start + len(data) - end}
    """
    buffer = StringIO()
    fileobj.seek(end)
    copyfileobj(fileobj, buffer, -1)
    buffer.flush()
    buffer.seek(0)
    fileobj.seek(start)
    fileobj.write(data)
    fileobj.flush()
    fileobj.truncate()
    delta = fileobj.tell() - end  # < 0 if len(data) < end-start
    copyfileobj(buffer, fileobj, -1)
    fileobj.flush()
    buffer.close()
    return delta
Example #32
0
File: meta.py Project: Farb/calibre
def insert_into_file(fileobj, data, start, end):
    """
    Insert data into fileobj at position C{start}.

    This function inserts data into a file, overwriting all data between start
    and end. If end == start no data is overwritten. Do not use this function to
    append data to a file.

    @param fileobj: file like object
    @param data:    data to be inserted into fileobj
    @param start:   The position at which to start inserting data
    @param end:     The position in fileobj of data that must not be overwritten
    @return:        C{start + len(data) - end}
    """
    buffer = StringIO()
    fileobj.seek(end)
    copyfileobj(fileobj, buffer, -1)
    buffer.flush()
    buffer.seek(0)
    fileobj.seek(start)
    fileobj.write(data)
    fileobj.flush()
    fileobj.truncate()
    delta = fileobj.tell() - end  # < 0 if len(data) < end-start
    copyfileobj(buffer, fileobj, -1)
    fileobj.flush()
    buffer.close()
    return delta
Example #33
0
 def get(self, request, format=None):
     server_keys = ServerKeys.objects.all()
     if not server_keys:
         return HttpResponse("Server is not created", status.HTTP_204_NO_CONTENT)
     keys = OpenVPNKeys.objects.filter(user=request.user)
     if not keys:
         return HttpResponse("You have no keys", status.HTTP_204_NO_CONTENT)
     # lumberjack style checking permission
     elif keys[0].user != request.user:
         return HttpResponse("You do not have permission to perform this action.", status.HTTP_403_FORBIDDEN)
     config = OpenVPNConfig.objects.filter(deployed=True)
     if not config:
         return HttpResponse("There is no config file deployed on server side", status.HTTP_204_NO_CONTENT)
     ca = server_keys[0].decode('public_ca')
     key = keys[0].decode('private_key')
     cert = keys[0].decode('public_key')
     key_part = ConfigFunctions().create_user_keys(ca=ca, key=key, cert=cert)
     client_config = config[0].create_client_config(keys=key_part)
     file = StringIO()
     file.write(client_config)
     file.flush()
     file.seek(0)
     response = HttpResponse(FileWrapper(file))
     response['Content-Disposition'] = 'attachment; filename=client.conf'
     return response
def wikify(s): 
   output = StringIO()
   parser = Wikify(write=output.write)
   parser.feed(s) # @@ except?
   output.flush()
   output.seek(0)
   return output.read()
Example #35
0
 def test_write_empy_seq(self):
     'It does not write an empty sequence'
     seq1 = SeqRecord(Seq('ACTG'), id='seq1')
     fhand = StringIO()
     _write_seqrecords([seq1, None, SeqRecord(Seq(''), id='seq2')], fhand,
                      file_format='fasta')
     fhand.flush()
     assert fhand.getvalue() == '>seq1\nACTG\n'
def to_mem_excel(dataframe, sheet_name='WorkSheet'):
    iobuffer = BytesIO()
    writer = ExcelWriter(iobuffer, engine='xlwt')
    dataframe.to_excel(writer, sheet_name=sheet_name)
    writer.save()
    iobuffer.flush()
    iobuffer.seek(0)
    return iobuffer.getvalue()
def to_mem_excel(dataframe, sheet_name='WorkSheet'):
    iobuffer = BytesIO()
    writer = ExcelWriter(iobuffer, engine='xlwt')
    dataframe.to_excel(writer, sheet_name=sheet_name)
    writer.save()
    iobuffer.flush()
    iobuffer.seek(0)
    return iobuffer.getvalue()
Example #38
0
 def test_parse_relay_file(self):
     fp = StringIO()
     fp.write("websocket 0.0.1.0:1\n")
     fp.flush()
     fp.seek(0)
     af = socket.AF_INET
     servers = { af: Endpoints(af=af) }
     parse_relay_file(servers, fp)
     self.assertEquals(servers[af]._endpoints, {('0.0.1.0', 1): Transport('', 'websocket')})
Example #39
0
    def get(self, request, graphid, nodeid=None):
        if self.action == 'export_graph':
            graph = get_graphs_for_export([graphid])
            graph['metadata'] = system_metadata()
            f = JSONSerializer().serialize(graph, indent=4)
            graph_name = JSONDeserializer().deserialize(f)['graph'][0]['name']

            response = HttpResponse(f, content_type='json/plain')
            response['Content-Disposition'] = 'attachment; filename="%s.json"' % (graph_name)
            return response
        elif self.action == 'export_mapping_file':
            files_for_export = create_mapping_configuration_file(graphid, True)
            file_name = Graph.objects.get(graphid=graphid).name

            buffer = StringIO()

            with zipfile.ZipFile(buffer, 'w', zipfile.ZIP_DEFLATED) as zip:
                for f in files_for_export:
                    f['outputfile'].seek(0)
                    zip.writestr(f['name'], f['outputfile'].read())

            zip.close()
            buffer.flush()
            zip_stream = buffer.getvalue()
            buffer.close()

            response = HttpResponse()
            response['Content-Disposition'] = 'attachment; filename=' + file_name + '.zip'
            response['Content-length'] = str(len(zip_stream))
            response['Content-Type'] = 'application/zip'
            response.write(zip_stream)
            return response

        elif self.action == 'get_domain_connections':
            res = []
            graph = Graph.objects.get(graphid=graphid)
            ontology_class = request.GET.get('ontology_class', None)
            ret = graph.get_valid_domain_ontology_classes()
            for r in ret:
                res.append({'ontology_property': r['ontology_property'], 'ontology_classes': [
                           c for c in r['ontology_classes']]})
            return JSONResponse(res)

        else:
            graph = Graph.objects.get(graphid=graphid)
            if self.action == 'get_related_nodes':
                parent_nodeid = request.GET.get('parent_nodeid', None)
                ret = graph.get_valid_ontology_classes(nodeid=nodeid, parent_nodeid=parent_nodeid)

            elif self.action == 'get_valid_domain_nodes':
                if nodeid == '':
                    nodeid = None
                ret = graph.get_valid_domain_ontology_classes(nodeid=nodeid)

            return JSONResponse(ret)

        return HttpResponseNotFound()
Example #40
0
def export(request, rhp_id):
    try:
        rhp = Rhp.objects.get(pk=rhp_id)
    except Rhp.DoesNotExist:
        raise Http404

    # create the jinja2 evironment for latex response
    # loader = FileSystemLoader('/path/to/templates')

    loader = PackageLoader('rhp', 'templates/latex')
    latex_helper = LatexHelper(loader)

    context = {
        'rhp': rhp,
        'vlu': rhp.vlu,
        'fragen': Frage.objects.select_related(),
        'fragensets': Fragenset.objects.select_related(),
        'optionen': Option.objects.select_related(),
        'vorlesungen': rhp.vlu.vorlesungen.select_related(),
        'artikel': rhp.artikel.all(),
        }

    files = []
    tmpfiles = []
    for tpl in latex_helper.env.list_templates():
        if tpl and (tpl.find('.tex') > 0 or tpl.find('.sty') > 0):
            template = latex_helper.env.get_template(tpl)
            f = tempfile.NamedTemporaryFile()
            f.write(template.render(context).encode('utf8'))
            f.flush()
            tmpfiles.append((tpl, f))
        else:
            files.append((tpl, loader.get_source(latex_helper.env,
                         tpl)[1]))

    # return as a zip file. from here: https://code.djangoproject.com/wiki/CookBookDynamicZip

    response = HttpResponse(mimetype='application/zip')
    response['Content-Disposition'] = 'filename=' + rhp.name + '.zip'

    buffer = StringIO()
    zip = zipfile.ZipFile(buffer, 'w', zipfile.ZIP_DEFLATED)

    for (name, f) in tmpfiles:
        zip.write(f.name, rhp.name + '/' + name)
        f.close()

    for (name, f) in files:
        zip.write(f, rhp.name + '/' + name)

    zip.close()
    buffer.flush()

    response.write(buffer.getvalue())
    buffer.close()
    return response
def decompress_tex(tex_texture_buffer):
    tBuffer=StringIO()
    tBuffer.write(tex_texture_buffer)
    tBuffer.seek(0)
    magic=tBuffer.read(4)
    if magic=='\x54\x45\x58\x20':
        width,height=struct.unpack('2H',tBuffer.read(4))
        ver=ord(tBuffer.read(1))
        color_mode=ord(tBuffer.read(1))
        unk=struct.unpack('H',tBuffer.read(2))[0]
        tBuffer.seek(8,1)
        CompressedSize=struct.unpack('I',tBuffer.read(4))[0]
        UncompressedSize=struct.unpack('I',tBuffer.read(4))[0]
        Crc32=struct.unpack('I',tBuffer.read(4))[0]
        zdata=tBuffer.read(CompressedSize)
        dec_data=decompress_deflate(zdata)
    tBuffer.flush()
    if color_mode==0xc:
        #Build PVRTC4 Header For texture
        pBuffer=StringIO()
        pBuffer.write('\x00'*0x34)
        pBuffer.write('\x00'*(width*height/2))
        pBuffer.seek(0)
        pBuffer.write('\x34\x00\x00\x00')
        pBuffer.write(struct.pack('I',width))
        pBuffer.write(struct.pack('I',height))
        pBuffer.seek(4,1)
        pBuffer.write(struct.pack('I',0x8019))
        pBuffer.write(struct.pack('I',(width*height/2)))
        pBuffer.write(struct.pack('I',4))
        pBuffer.seek(0xc,1)
        pBuffer.write(struct.pack('I',1))
        pBuffer.write('PVR!')
        pBuffer.write(struct.pack('I',1))
        pBuffer.write(dec_data)
    if color_mode==0x2:
        #build 32BPP Header For texture
        pBuffer=StringIO()
        pBuffer.write('\x00'*0x34)
        pBuffer.write('\x00'*(width*height*4))
        pBuffer.seek(0)
        pBuffer.write('\x34\x00\x00\x00')
        pBuffer.write(struct.pack('I',width))
        pBuffer.write(struct.pack('I',height))
        pBuffer.seek(4,1)
        pBuffer.write(struct.pack('I',0x8012))
        pBuffer.write(struct.pack('I',(width*height*4)))
        pBuffer.write(struct.pack('I',0x20))
        pBuffer.write(struct.pack('I',0xff))
        pBuffer.write(struct.pack('I',0xff00))
        pBuffer.write(struct.pack('I',0xff0000))
        pBuffer.write(struct.pack('I',0xff000000))
        pBuffer.write('PVR!')
        pBuffer.write(struct.pack('I',1))
        pBuffer.write(dec_data)
    return pBuffer.getvalue()
Example #42
0
def primer_download(request, cid):
    con = get_construct(request.user, cid)
    if con and con.fragments.all().count():
        print request.GET['tk']
        #set up response headers
        response = HttpResponse(mimetype='application/zip')
        response[
            'Content-Disposition'] = 'attachment; filename=' + con.name + '.zip'
        response.set_cookie('fileDownloadToken', request.GET['tk'])

        # get all the pcr instruction files
        pcr = [(con.name + '-' + cf.fragment.name + '.pcr', pcr_cycle(cf))
               for cf in con.cf.all()]

        # write the csv file
        csvbuffer = StringIO()
        writer = csv.writer(csvbuffer)
        writer.writerow(['Name', 'Length', 'Melting Temperature', 'Sequence'])
        for p in con.primer.all():
            writer.writerow(p.csv())
        csvbuffer.flush()

        # write the pdf
        t = loader.get_template('gibson/pdf_primer.html')
        c = RequestContext(request, {
            'construct': con,
            'each': 5.0 / con.fragments.all().count()
        })
        pdfbuffer = StringIO()
        pdf = pisa.CreatePDF(StringIO(t.render(c).encode("ISO-8859-1")),
                             pdfbuffer,
                             link_callback=fetch_resources)

        # write the zip file
        zipbuffer = StringIO()
        zip = zipfile.ZipFile(zipbuffer, 'w', zipfile.ZIP_DEFLATED)
        # add the pcr files
        for name, f in pcr:
            zip.writestr(con.name + '/pcr/' + name, f)
        # add the csv file
        zip.writestr(con.name + '/primers.csv', csvbuffer.getvalue())
        # add the pdf
        zip.writestr(con.name + '/' + con.name + '.pdf', pdfbuffer.getvalue())
        # add the gb
        zip.writestr(con.name + '/' + con.name + '.gb', con.gb())
        # closing of buffers and return
        csvbuffer.close()
        pdfbuffer.close()
        zip.close()
        zipbuffer.flush()
        ret_zip = zipbuffer.getvalue()
        zipbuffer.close()
        response.write(ret_zip)
        return response
    else:
        return HttpResponseNotFound()
Example #43
0
def make_csv(primers):
    csvbuffer = StringIO()
    writer = csv.writer(csvbuffer)
    writer.writerow(['Name', 'Length', 'Melting Temperature', 'Sequence'])
    for p in primers:
        writer.writerow(p.csv())
    csvbuffer.flush()
    retval = csvbuffer.getvalue()
    csvbuffer.close()
    return retval
Example #44
0
 def test_write_empy_seq(self):
     'It does not write an empty sequence'
     seq1 = SeqRecord(Seq('ACTG'), id='seq1')
     fhand = StringIO()
     _write_seqrecords(
         [seq1, None, SeqRecord(Seq(''), id='seq2')],
         fhand,
         file_format='fasta')
     fhand.flush()
     assert fhand.getvalue() == '>seq1\nACTG\n'
Example #45
0
def zip_csv(filename, data):

    with ZipFile(filename, 'w') as zipfile:
        for _i, points in enumerate(data):
            string_buffer = StringIO()
            writer = csv.writer(string_buffer)
            writer.writerows(points.tolist())
            string_buffer.flush()
            zipfile.writestr("{0:05d}.csv".format(_i + 1),
                             string_buffer.getvalue())
Example #46
0
def dumps( object):
    """Return a zipped pickle of the object as a string"""
    fileobj = StringIO()
    try:
        dump( object, fileobj )
        fileobj.flush()
        value = fileobj.getvalue()
    finally:
        fileobj.close()
    return value
Example #47
0
def make_csv(primers):
    csvbuffer = StringIO()
    writer = csv.writer(csvbuffer)
    writer.writerow(['Name', 'Length', 'Melting Temperature', 'Sequence'])
    for p in primers:
        writer.writerow(p.csv())
    csvbuffer.flush()
    retval = csvbuffer.getvalue()
    csvbuffer.close()
    return retval
Example #48
0
 def test_parse_relay_file(self):
     fp = StringIO()
     fp.write("websocket 0.0.1.0:1\n")
     fp.flush()
     fp.seek(0)
     af = socket.AF_INET
     servers = {af: Endpoints(af=af)}
     parse_relay_file(servers, fp)
     self.assertEquals(servers[af]._endpoints,
                       {('0.0.1.0', 1): Transport('', 'websocket')})
Example #49
0
def dumps( object):
    """Return a zipped pickle of the object as a string"""
    fileobj = StringIO()
    try:
        dump( object, fileobj )
        fileobj.flush()
        value = fileobj.getvalue()
    finally:
        fileobj.close()
    return value
def wikiParse(s, getlinks=False): 
   output = StringIO()
   parse = TextParser(write=output.write, 
                      exists=lambda wn: os.path.exists(wn + '.html'))
   parse(s)
   output.flush()
   output.seek(0)
   if getlinks: 
      return output.read(), parse.rawlinks
   return output.read()
Example #51
0
def wikiParse(s, getlinks=False):
    output = StringIO()
    parse = TextParser(write=output.write,
                       exists=lambda wn: os.path.exists(wn + '.html'))
    parse(s)
    output.flush()
    output.seek(0)
    if getlinks:
        return output.read(), parse.rawlinks
    return output.read()
Example #52
0
def export(request, rhp_id):
    try:
        rhp = Rhp.objects.get(pk=rhp_id)
    except Rhp.DoesNotExist:
        raise Http404

    # create the jinja2 evironment for latex response
    # loader = FileSystemLoader('/path/to/templates')

    loader = PackageLoader('rhp', 'templates/latex')
    latex_helper = LatexHelper(loader)

    context = {
        'rhp': rhp,
        'vlu': rhp.vlu,
        'fragen': Frage.objects.select_related(),
        'fragensets': Fragenset.objects.select_related(),
        'optionen': Option.objects.select_related(),
        'vorlesungen': rhp.vlu.vorlesungen.select_related(),
        'artikel': rhp.artikel.all(),
    }

    files = []
    tmpfiles = []
    for tpl in latex_helper.env.list_templates():
        if tpl and (tpl.find('.tex') > 0 or tpl.find('.sty') > 0):
            template = latex_helper.env.get_template(tpl)
            f = tempfile.NamedTemporaryFile()
            f.write(template.render(context).encode('utf8'))
            f.flush()
            tmpfiles.append((tpl, f))
        else:
            files.append((tpl, loader.get_source(latex_helper.env, tpl)[1]))

    # return as a zip file. from here: https://code.djangoproject.com/wiki/CookBookDynamicZip

    response = HttpResponse(mimetype='application/zip')
    response['Content-Disposition'] = 'filename=' + rhp.name + '.zip'

    buffer = StringIO()
    zip = zipfile.ZipFile(buffer, 'w', zipfile.ZIP_DEFLATED)

    for (name, f) in tmpfiles:
        zip.write(f.name, rhp.name + '/' + name)
        f.close()

    for (name, f) in files:
        zip.write(f, rhp.name + '/' + name)

    zip.close()
    buffer.flush()

    response.write(buffer.getvalue())
    buffer.close()
    return response
Example #53
0
    def run_step(self,
                 step_dict,
                 input_file,
                 outfile_name,
                 step_number,
                 step_type,
                 env,
                 child_stdin=None):
        common_args = (['--step-num=%d' % step_number] +
                       self._mr_job_extra_args(local=True))

        if step_type == 'mapper':
            child_args = (['--mapper'] + [input_file] + common_args)
        elif step_type == 'reducer':
            child_args = (['--reducer'] + [input_file] + common_args)
        elif step_type == 'combiner':
            child_args = ['--combiner'] + common_args + ['-']

        child_instance = self._mrjob_cls(args=child_args)

        has_combiner = (step_type == 'mapper' and 'combiner' in step_dict)

        # Use custom stdin
        if has_combiner:
            child_stdout = StringIO()
        else:
            child_stdout = open(outfile_name, 'w')

        with save_current_environment():
            os.environ.update(env)
            child_instance.sandbox(stdin=child_stdin, stdout=child_stdout)
            child_instance.execute()

        if has_combiner:
            sorted_lines = sorted(child_stdout.getvalue().splitlines())
            combiner_stdin = StringIO('\n'.join(sorted_lines))
        else:
            child_stdout.flush()

        child_stdout.close()

        while len(self._counters) <= step_number:
            self._counters.append({})
        child_instance.parse_counters(self._counters[step_number - 1])

        if has_combiner:
            self.run_step(step_dict,
                          "",
                          outfile_name,
                          step_number,
                          'combiner',
                          env=env,
                          child_stdin=combiner_stdin)

            combiner_stdin.close()
Example #54
0
def export_raw_xml(request):
    if request.method == 'GET':
        #info = LocalizationInfo.get_lastest_info()
        appversion = request.GET.get("appversion")
        if not appversion:
            return json_response_error(
                PARAM_REQUIRED, msg="parameter 'appversion' invalid")
        info = LocalizationInfo.find(
            {"appversion": appversion, "locale": ""}, one=True)
        appname = info['appname']
        locales = LocalizationConfig.get_app_locales(
            appname, appversion, locale_only=True)
        ret_sio = StringIO()
        ret_zip = zipfile.ZipFile(ret_sio, 'w')

        for l in locales:
            missing_xml_dirpath = os.path.join(
                STATIC_ROOT, appname, appversion, 'miss_xml_data',
                'values' if l == '' else "values-%s" % l)
            l_strings = LocalizationTask.organize_strings(
                appname, appversion, l, as_raw=True)

            if os.path.exists(missing_xml_dirpath):
                files = os.listdir(missing_xml_dirpath)
                for f in files:
                    if f not in l_strings:
                        sio = StringIO()
                        sio.write(
                            file(os.path.join(missing_xml_dirpath, f)).read())
                        sio.flush()
                        missing_filename = "%s_missing%s" % tuple(
                            os.path.splitext(f))
                        ret_zip.writestr(
                            os.path.join(
                                "values-%s" % l if l else 'values',
                                missing_filename),
                            sio.getvalue())

            for k, v in l_strings.iteritems():
                missing_filename = "%s_missing%s" % tuple(os.path.splitext(k))
                # to adapt to standard of client side
                filepath = os.path.join(
                    "values-%s" % l if l else 'values', missing_filename)
                sio = StringIO()
                v.write(sio, encoding='utf-8', xml_declaration=True)
                ret_zip.writestr(filepath, sio.getvalue())

        ret_zip.close()
        response = HttpResponse(ret_sio.getvalue(), mimetype="application/zip")
        response['Content-Disposition'] = "attachment; "\
            "filename=raw_xml_data_%s_%s.zip" % (appname, appversion)
        return response
    else:
        return json_response_error(METHOD_ERROR, msg="http method wrong")
Example #55
0
def write_ini(x, ofile):
    out = StringIO()
    for topic in sorted(x):
        out.write('[' + str(topic) + ']\n')
        for fld in sorted(x[topic]):
            out.write(
                str(fld) + '=' + ','.join(map(str, x[topic][fld])) + '\n')
    out.flush()
    with open(ofile, 'w') as fid:
        fid.write(out.getvalue())
    out.close()
Example #56
0
    def get(self, request, graphid, nodeid=None):
        if self.action == 'export_graph':
            graph = get_graphs_for_export([graphid])
            graph['metadata'] = system_metadata()
            f = JSONSerializer().serialize(graph, indent=4)
            graph_name = JSONDeserializer().deserialize(f)['graph'][0]['name']

            response = HttpResponse(f, content_type='json/plain')
            response['Content-Disposition'] = 'attachment; filename="%s.json"' %(graph_name)
            return response
        elif self.action == 'export_mapping_file':
            files_for_export = create_mapping_configuration_file(graphid, True)
            file_name = Graph.objects.get(graphid=graphid).name

            buffer = StringIO()

            with zipfile.ZipFile(buffer, 'w', zipfile.ZIP_DEFLATED) as zip:
                for f in files_for_export:
                    f['outputfile'].seek(0)
                    zip.writestr(f['name'], f['outputfile'].read())

            zip.close()
            buffer.flush()
            zip_stream = buffer.getvalue()
            buffer.close()

            response = HttpResponse()
            response['Content-Disposition'] = 'attachment; filename=' + file_name + '.zip'
            response['Content-length'] = str(len(zip_stream))
            response['Content-Type'] = 'application/zip'
            response.write(zip_stream)
            return response

        elif self.action == 'get_domain_connections':
            res = []
            graph = Graph.objects.get(graphid=graphid)
            ontology_class = request.GET.get('ontology_class', None)
            ret = graph.get_valid_domain_ontology_classes()
            for r in ret:
                res.append({'ontology_property': r['ontology_property'], 'ontology_classes':[c for c in r['ontology_classes'] if c == ontology_class]})
            return JSONResponse(res)

        else:
            graph = Graph.objects.get(graphid=graphid)
            if self.action == 'get_related_nodes':
                parent_nodeid = request.GET.get('parent_nodeid', None)
                ret = graph.get_valid_ontology_classes(nodeid=nodeid, parent_nodeid=parent_nodeid)

            elif self.action == 'get_valid_domain_nodes':
                ret = graph.get_valid_domain_ontology_classes(nodeid=nodeid)

            return JSONResponse(ret)

        return HttpResponseNotFound()
Example #57
0
    def _run_step(self, step_num, step_type, input_path, output_path,
                  working_dir, env, child_stdin=None):
        step = self._get_step(step_num)

        # Passing local=False ensures the job uses proper names for file
        # options (see issue #851 on github)
        common_args = (['--step-num=%d' % step_num] +
                       self._mr_job_extra_args(local=False))

        if step_type == 'mapper':
            child_args = (
                ['--mapper'] + [input_path] + common_args)
        elif step_type == 'reducer':
            child_args = (
                ['--reducer'] + [input_path] + common_args)
        elif step_type == 'combiner':
            child_args = ['--combiner'] + common_args + ['-']

        child_instance = self._mrjob_cls(args=child_args)

        has_combiner = (step_type == 'mapper' and 'combiner' in step)

        # Use custom stdin
        if has_combiner:
            child_stdout = StringIO()
        else:
            child_stdout = open(output_path, 'w')

        with save_current_environment():
            with save_cwd():
                os.environ.update(env)
                os.chdir(working_dir)

                child_instance.sandbox(stdin=child_stdin, stdout=child_stdout)
                child_instance.execute()

        if has_combiner:
            sorted_lines = sorted(child_stdout.getvalue().splitlines())
            combiner_stdin = StringIO('\n'.join(sorted_lines))
        else:
            child_stdout.flush()

        child_stdout.close()

        while len(self._counters) <= step_num:
            self._counters.append({})
        parse_mr_job_stderr(child_instance.stderr.getvalue(),
                            counters=self._counters[step_num])

        if has_combiner:
            self._run_step(step_num, 'combiner', None, output_path,
                           working_dir, env, child_stdin=combiner_stdin)

            combiner_stdin.close()
Example #58
0
class intercept_stdout(object):
    def __init__(self):
        self.buf = StringIO()

    def __enter__(self):
        self.old_stdout = sys.stdout
        sys.stdout = self.buf
        return self.buf

    def __exit__(self, exc_type, exc_val, exc_tb):
        sys.stdout = self.old_stdout
        self.buf.flush()
Example #59
0
    def _invoke_inline_mrjob(self,
                             step_number,
                             step_dict,
                             outfile_name,
                             substep_to_run,
                             child_stdin=None):
        child_stdin = child_stdin or sys.stdin
        common_args = (['--step-num=%d' % step_number] +
                       self._mr_job_extra_args(local=True))

        if substep_to_run == 'mapper':
            child_args = (['--mapper'] + self._decide_input_paths() +
                          common_args)
        elif substep_to_run == 'reducer':
            child_args = (['--reducer'] + self._decide_input_paths() +
                          common_args)
        elif substep_to_run == 'combiner':
            child_args = ['--combiner'] + common_args + ['-']

        child_instance = self._mrjob_cls(args=child_args)

        has_combiner = (substep_to_run == 'mapper' and 'combiner' in step_dict)

        # Use custom stdin
        if has_combiner:
            child_stdout = StringIO()
        else:
            outfile = self._decide_output_path(outfile_name)
            child_stdout = open(outfile, 'w')

        child_instance.sandbox(stdin=child_stdin, stdout=child_stdout)
        child_instance.execute()

        if has_combiner:
            sorted_lines = sorted(child_stdout.getvalue().splitlines())
            combiner_stdin = StringIO('\n'.join(sorted_lines))
        else:
            child_stdout.flush()

        child_stdout.close()

        while len(self._counters) <= step_number:
            self._counters.append({})
        child_instance.parse_counters(self._counters[step_number - 1])
        self.print_counters([step_number + 1])

        if has_combiner:
            self._invoke_inline_mrjob(step_number,
                                      step_dict,
                                      outfile_name,
                                      'combiner',
                                      child_stdin=combiner_stdin)
            combiner_stdin.close()