Ejemplo n.º 1
0
    def test_read_not_bytes(self):
        source = NamedTemporaryFile(mode="w+")
        source.write("a bunch of text")
        source.seek(0)

        with self.assertRaises(ValueError):
            self.tree._read(source)
Ejemplo n.º 2
0
 def get_tempfile(self, **kwargs):
     kwargs.setdefault('suffix', '.vrt')
     tempfile = NamedTemporaryFile(**kwargs)
     tempfile.write(self.content)
     tempfile.flush()
     tempfile.seek(0)
     return tempfile
Ejemplo n.º 3
0
    def _generate_training_files(self):
        """Returns a tuple of file objects suitable for passing to the
        RdpTrainer application controller.
        """
        tmp_dir = get_qiime_temp_dir()
        training_set = RdpTrainingSet()
        reference_seqs_file = open(self.Params['reference_sequences_fp'], 'U')
        id_to_taxonomy_file = open(self.Params['id_to_taxonomy_fp'], 'U')

        for seq_id, seq in MinimalFastaParser(reference_seqs_file):
            training_set.add_sequence(seq_id, seq)

        for line in id_to_taxonomy_file:
            seq_id, lineage_str = map(strip, line.split('\t'))
            training_set.add_lineage(seq_id, lineage_str)

        training_set.dereplicate_taxa()

        rdp_taxonomy_file = NamedTemporaryFile(
            prefix='RdpTaxonAssigner_taxonomy_', suffix='.txt', dir=tmp_dir)
        rdp_taxonomy_file.write(training_set.get_rdp_taxonomy())
        rdp_taxonomy_file.seek(0)

        rdp_training_seqs_file = NamedTemporaryFile(
            prefix='RdpTaxonAssigner_training_seqs_', suffix='.fasta',
            dir=tmp_dir)
        for rdp_id, seq in training_set.get_training_seqs():
            rdp_training_seqs_file.write('>%s\n%s\n' % (rdp_id, seq))
        rdp_training_seqs_file.seek(0)

        self._training_set = training_set

        return rdp_taxonomy_file, rdp_training_seqs_file
Ejemplo n.º 4
0
def generate_shscript(args):

    """ Generate sh script input file"""

    header = """\
    #!/bin/bash
    #PBS -q {0}
    #PBS -l nodes={1}:ppn={2}:native
    #PBS -l walltime={3}
    #PBS -N {4}
    #PBS -o {4}.out
    #PBS -e {4}.err
    #PBS -A {5}
    #PBS -M {6}
    #PBS -m abe
    #PBS -V

    """.format(args.queue, args.nodes, args.ppn, args.walltime,
               args.run_name, args.account, args.email)
    header = dedent(header)

    print header + args.cmd 

    f = NamedTemporaryFile(delete=False)
    f.write(header + args.cmd)
    f.seek(0)

    # for i in f:
    #     print i.strip()
    return f.name
Ejemplo n.º 5
0
    def create_temp_file(self, edid_binary):
        edid_file = NamedTemporaryFile(delete=False)
        edid_file.write(edid_binary)
        edid_file.flush()
        edid_file.seek(0)

        return edid_file
Ejemplo n.º 6
0
class TestTrio(object):
    """Test class for testing how the individual class behave"""
    
    def setup_class(self):
        """Setup a standard trio."""
        trio_lines = ['#Standard trio\n', 
                    '#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n', 
                    'healthyParentsAffectedSon\tproband\tfather\tmother\t1\t2\n',
                    'healthyParentsAffectedSon\tmother\t0\t0\t2\t1\n', 
                    'healthyParentsAffectedSon\tfather\t0\t0\t1\t1\n'
                    ]
        self.trio_file = NamedTemporaryFile(mode='w+t', delete=False, suffix='.vcf')
        self.trio_file.writelines(trio_lines)
        self.trio_file.seek(0)
        self.trio_file.close()
        
    
    def test_standard_trio(self):
        """Test if the file is parsed in a correct way."""
        family_parser = parser.FamilyParser(open(self.trio_file.name, 'r'))
        assert family_parser.header == [
                                    'family_id', 
                                    'sample_id', 
                                    'father_id', 
                                    'mother_id', 
                                    'sex', 
                                    'phenotype'
                                    ]
        assert 'healthyParentsAffectedSon' in family_parser.families
        assert set(['proband', 'mother', 'father']) == set(family_parser.families['healthyParentsAffectedSon'].individuals.keys())
        assert set(['proband', 'mother', 'father']) == set(family_parser.families['healthyParentsAffectedSon'].trios[0])
Ejemplo n.º 7
0
def reg_code():
    img, code =generate_code_image((80,30),5)
    session["code"] = code
    tp = NamedTemporaryFile()
    img.save(tp.name,format="png")
    tp.seek(0)
    return send_file(tp.name,mimetype='image/png')
Ejemplo n.º 8
0
def setup_vcf_file():
    """
    Print some variants to a vcf file and return the filename
    """
    vcf_lines = [
        '##fileformat=VCFv4.1\n',
        '##INFO=<ID=MQ,Number=1,Type=Float,Description="RMS Mapping Quality">\n',
        '##contig=<ID=1,length=249250621,assembly=b37>\n',
        '##reference=file:///humgen/gsa-hpprojects/GATK/bundle'\
        '/current/b37/human_g1k_v37.fasta\n',
        '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tfather\tmother\tproband\n',
        '1\t11900\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/1:60\t0/1:60\t1/1:60\n',
        '1\t879585\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/1:60\t0/0:60\t0/1:60\n',
        '1\t879586\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/1:60\t0/1:60\n',
        '1\t947378\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/0:60\t0/1:60\n',
        '1\t973348\t.\tG\tA\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/0:60\t0/1:60\n',
        '3\t879585\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/1:60\t0/0:60\t0/1:60\n',
        '3\t879586\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/1:60\t0/1:60\n',
        '3\t947378\t.\tA\tT\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/0:60\t0/1:60\n',
        '3\t973348\t.\tG\tA\t100\tPASS\tMQ=1\tGT:GQ\t0/0:60\t0/0:60\t0/1:60\n'
        ]
    vcf_file = NamedTemporaryFile(mode='w+t', delete=False, suffix='.vcf')
    vcf_file.writelines(vcf_lines)
    vcf_file.seek(0)
    vcf_file.close()
    
    return vcf_file.name
Ejemplo n.º 9
0
def write_temp_file(data):
    # create a temp file for use as a config file. This should get cleaned
    # up magically at the end of the run.
    fid = NamedTemporaryFile(mode='w+b', suffix='.tmp')
    fid.write(data)
    fid.seek(0)
    return fid
Ejemplo n.º 10
0
 def test_dump_load(self):
     from tempfile import NamedTemporaryFile
     handle = NamedTemporaryFile()
     self.discodb.dump(handle)
     handle.seek(0)
     discodb = DiscoDB.load(handle)
     self.assertEquals(discodb.dumps(), self.discodb.dumps())
Ejemplo n.º 11
0
def get_subtitle(url, path):
    in_data = urllib2.urlopen(url)
    temp_file = NamedTemporaryFile()

    temp_file.write(in_data.read())
    in_data.close()
    temp_file.seek(0)

    if is_zipfile(temp_file.name):
        zip_file = ZipFile(temp_file)
        for name in zip_file.namelist():
            # don't unzip stub __MACOSX folders
            if '.srt' in name and '__MACOSX' not in name:
                logger.info(' '.join(['Unpacking zipped subtitle', name, 'to', os.path.dirname(path)]))
                zip_file.extract(name, os.path.dirname(path))

        zip_file.close()
    elif is_rarfile(temp_file.name):
        rar_path = path + '.rar'
        logger.info('Saving rared subtitle as %s' % rar_path)
        with open(rar_path, 'w') as out_file:
            out_file.write(temp_file.read())

        try:
            import subprocess
            #extract all .srt in the rared file
            ret_code = subprocess.call(['unrar', 'e', '-n*srt', rar_path])
            if ret_code == 0:
                logger.info('Unpacking rared subtitle to %s' % os.path.dirname(path))
                os.remove(rar_path)
        except OSError:
            logger.info('Unpacking rared subtitle failed.'
                        'Please, install unrar to automate this step.')
    temp_file.close()
Ejemplo n.º 12
0
 def _ufopen(self, _url, _filename ):
     '''Open url and save in tempfile'''
     import urllib
     __t = NamedTemporaryFile(prefix='',suffix= _filename )
     __t.write( urllib.urlopen(_url).read())
     __t.seek(0)
     return __t.name, __t
Ejemplo n.º 13
0
def run_as(user, command, *args, **kwargs):
    """
    Run a command as a particular user, using ``/etc/environment`` and optionally
    capturing and returning the output.

    Raises subprocess.CalledProcessError if command fails.

    :param str user: Username to run command as
    :param str command: Command to run
    :param list args: Additional args to pass to command
    :param dict env: Additional env variables (will be merged with ``/etc/environment``)
    :param bool capture_output: Capture and return output (default: False)
    :param str input: Stdin for command
    """
    parts = [command] + list(args)
    quoted = ' '.join("'%s'" % p for p in parts)
    env = read_etc_env()
    if 'env' in kwargs:
        env.update(kwargs['env'])
    run = check_output if kwargs.get('capture_output') else check_call
    try:
        stdin = None
        if 'input' in kwargs:
            stdin = NamedTemporaryFile()
            stdin.write(kwargs['input'])
            stdin.seek(0)
        return run(['su', user, '-c', quoted], env=env, stdin=stdin)
    finally:
        if stdin:
            stdin.close()  # this also removes tempfile
Ejemplo n.º 14
0
def write_temp_file(text, encoding='utf-8', delete=True):
    tmp = NamedTemporaryFile(delete=delete)
    if type(text) == unicode:
        text = text.encode(encoding, 'ignore')
    tmp.write(text)
    tmp.seek(0)
    return File(tmp)
Ejemplo n.º 15
0
 def __init__(self, srv, lookup, userdb, spconf, url, return_to, verification_endpoint="verify", cache=None, bindings=None):
     """
     Constructor for the class.
     :param srv: Usually none, but otherwise the oic server.
     :param return_to: The URL to return to after a successful
     authentication.
     """
     self.userdb = userdb
     if cache is None:
         self.cache_outstanding_queries = {}
     else:
         self.cache_outstanding_queries = cache
     UserAuthnMethod.__init__(self, srv)
     self.return_to = return_to
     self.idp_query_param = "IdpQuery"
     if bindings:
         self.bindings = bindings
     else:
         self.bindings = [BINDING_HTTP_REDIRECT, BINDING_HTTP_POST,
                          BINDING_HTTP_ARTIFACT]
     self.verification_endpoint = verification_endpoint
     #Configurations for the SP handler. (pyOpSamlProxy.client.sp.conf)
     self.sp_conf = importlib.import_module(spconf)
     #self.sp_conf.BASE = self.sp_conf.BASE % url
     ntf = NamedTemporaryFile(suffix="pyoidc.py", delete=True)
     ntf.write("CONFIG = " + str(self.sp_conf.CONFIG).replace("%s", url))
     ntf.seek(0)
     self.sp = Saml2Client(config_file="%s" % ntf.name)
     mte = lookup.get_template("unauthorized.mako")
     argv = {
         "message": "You are not authorized!",
     }
     self.not_authorized = mte.render(**argv)
Ejemplo n.º 16
0
    def _create_zip(self, assignment, zip_rootdir_name):
        tempfile = NamedTemporaryFile()
        zip_file = zipfile.ZipFile(tempfile, 'w');

        for group in self._get_queryset(assignment):
            candidates = self._get_candidates_as_string(group)

            for deadline in group.deadlines.all():
                for delivery in deadline.deliveries.all():
                    for filemeta in delivery.filemetas.all():
                        file_content = filemeta.deliverystore.read_open(filemeta)
                        filenametpl = '{zip_rootdir_name}/{groupname}/deadline-{deadline}/delivery-{delivery_number}/{filename}'
                        if candidates:
                            groupname = '{candidates}_group-{groupid}'.format(
                                candidates=candidates,
                                groupid=group.id
                            )
                        else:
                            groupname = 'group-{groupid}'.format(groupid=group.id)
                        filename = filenametpl.format(zip_rootdir_name=zip_rootdir_name,
                            groupname=groupname,
                            deadline=deadline.deadline.strftime(self.DEADLINE_FORMAT),
                            delivery_number="%.3d" % delivery.number,
                            filename = filemeta.filename.encode(ZIPFILE_FILENAME_CHARSET))
                        zip_file.writestr(filename, file_content.read())
        zip_file.close()

        tempfile.seek(0)
        return tempfile
Ejemplo n.º 17
0
def main():
    """Implement the "rose config-dump" command."""
    opt_parser = RoseOptionParser()
    opt_parser.add_my_options("conf_dir", "files", "no_pretty_mode")
    opts = opt_parser.parse_args()[0]
    verbosity = opts.verbosity - opts.quietness
    report = Reporter(verbosity)
    fs_util = FileSystemUtil(report)
    if opts.conf_dir:
        fs_util.chdir(opts.conf_dir)
    file_names = []
    if opts.files:
        file_names = opts.files
    else:
        for dirpath, _, filenames in os.walk("."):
            for filename in fnmatch.filter(filenames, "rose-*.conf"):
                path = os.path.join(dirpath, filename)[2:]  # remove leading ./
                file_names.append(path)
    for file_name in file_names:
        handle = NamedTemporaryFile()
        node = ConfigLoader()(file_name)
        if (not opts.no_pretty_mode and
                os.path.basename(file_name) != META_CONFIG_NAME):
            pretty_format_config(node, ignore_error=True)
        ConfigDumper()(node, handle)
        handle.seek(0)
        if not filecmp.cmp(handle.name, file_name, shallow=False):
            report(ConfigDumpEvent(file_name))
            ConfigDumper()(node, file_name)
Ejemplo n.º 18
0
def make_image_file(dimensions=(320, 240), extension=".jpeg", force_size=None, orientation=None):
    """
    Yields a named temporary file created with the specified image type and
    options.

    Note the default dimensions are unequal (not a square) ensuring that center-square
    cropping logic will be exercised during tests.

    The temporary file will be closed and deleted automatically upon exiting
    the `with` block.
    """
    image = Image.new('RGB', dimensions, "green")
    image_file = NamedTemporaryFile(suffix=extension)
    try:
        if orientation and orientation in xrange(1, 9):
            exif_bytes = piexif.dump({'0th': {piexif.ImageIFD.Orientation: orientation}})
            image.save(image_file, exif=exif_bytes)
        else:
            image.save(image_file)
        if force_size is not None:
            image_file.seek(0, os.SEEK_END)
            bytes_to_pad = force_size - image_file.tell()
            # write in hunks of 256 bytes
            hunk, byte_ = bytearray([0] * 256), bytearray([0])
            num_hunks, remainder = divmod(bytes_to_pad, 256)
            for _ in xrange(num_hunks):
                image_file.write(hunk)
            for _ in xrange(remainder):
                image_file.write(byte_)
            image_file.flush()
        image_file.seek(0)
        yield image_file
    finally:
        image_file.close()
Ejemplo n.º 19
0
def module(minion, name, *args, **kwargs):
    """
    Execute an arbitrary salt module on dev minion.
    """

    script = '/usr/local/sbin/minion-' + minion
    if not os.access(script, os.X_OK):
        raise exc.CommandNotFoundError("Unknown minion " + minion)

    args = [script, name] + list(args)
    for k, v in kwargs.items():
        if k.startswith('__'):
            continue
        args.append('%s=%s' % (k, v))

    logger.debug("Calling %r", args)

    stderr = NamedTemporaryFile(
        prefix=script + '-' + name + '-', suffix='.stderr')
    child = subprocess.Popen(
        args, stdout=stderr, stderr=stderr)
    child.wait()

    stderr.seek(0)
    out = stderr.read()
    stderr.close()

    if child.returncode != 0:
        raise exc.CommandExecutionError(out)

    return out
Ejemplo n.º 20
0
def export(request, thegallery):
    """Export a gallery to a zip file and send it to the user.
    """
    # Check if the gallery is valid
    gallery = get_object_or_404(Gallery, pk=thegallery)
    
    # gather up the photos into a new directory
    tmpdir = mkdtemp()
    for photo in gallery.photo_set.all():
        shutil.copy(photo.get_image_filename(),
                        tmpdir)
    files = [ os.path.join(tmpdir, ff) for ff in os.listdir(tmpdir) ]
    outfile = NamedTemporaryFile()
    zf = zipfile.ZipFile(outfile, "w",
                         compression=zipfile.ZIP_DEFLATED)
    for filename in files:
        zf.write(filename, arcname=os.path.basename(filename))
    zf.close()
    outfile.flush()
    outfile.seek(0)
    shutil.rmtree(tmpdir)
    response = HttpResponse(outfile)
    response['Content-Type'] = "application/zip"
    response['Content-Length'] = str(os.stat(outfile.name)[stat.ST_SIZE])
    response['Content-Disposition'] = "attachment; filename=photos.zip"
    return response
Ejemplo n.º 21
0
def export_csv_for_model(model, dataset):
    model_cls = getattr(udata_models, model.capitalize(), None)
    if not model_cls:
        log.error('Unknow model %s' % model)
        return
    queryset = get_queryset(model_cls)
    adapter = csv.get_adapter(model_cls)
    if not adapter:
        log.error('No adapter found for %s' % model)
        return
    adapter = adapter(queryset)

    log.info('Exporting CSV for %s...' % model)

    csvfile = NamedTemporaryFile(delete=False)
    try:
        # write adapter results into a tmp file
        writer = csv.get_writer(csvfile)
        writer.writerow(adapter.header())
        for row in adapter.rows():
            writer.writerow(row)
        csvfile.seek(0)
        # make a resource from this tmp file
        created, resource = store_resource(csvfile, model, dataset)
        # add it to the dataset
        if created:
            dataset.add_resource(resource)
        dataset.last_modified = datetime.now()
        dataset.save()
    finally:
        os.unlink(csvfile.name)
Ejemplo n.º 22
0
def xls_export(request, username, id_string):
    owner = get_object_or_404(User, username=username)
    xform = get_object_or_404(XForm, id_string=id_string, user=owner)
    if not has_permission(xform, owner, request):
        return HttpResponseForbidden(_(u"Not shared."))
    query = request.GET.get("query")
    force_xlsx = request.GET.get("xlsx") == "true"
    xls_df_builder = XLSDataFrameBuilder(username, id_string, query)
    excel_defs = {
        "xls": {"suffix": ".xls", "mime_type": "vnd.ms-excel"},
        "xlsx": {"suffix": ".xlsx", "mime_type": "vnd.openxmlformats"},  # TODO: check xlsx mime type
    }
    ext = "xls" if not force_xlsx else "xlsx"
    if xls_df_builder.exceeds_xls_limits:
        ext = "xlsx"
    try:
        temp_file = NamedTemporaryFile(suffix=excel_defs[ext]["suffix"])
        xls_df_builder.export_to(temp_file.name)

        if request.GET.get("raw"):
            id_string = None
        response = response_with_mimetype_and_name(excel_defs[ext]["mime_type"], id_string, extension=ext)
        response.write(temp_file.read())
        temp_file.seek(0, os.SEEK_END)
        response["Content-Length"] = temp_file.tell()
        temp_file.close()
        return response
    except NoRecordsFoundError:
        return HttpResponse(_("No records found to export"))
Ejemplo n.º 23
0
 def export(self, out_f=None, format='mp3'):
     out_f = _fd_or_path_or_tempfile(out_f, 'wb+')
     out_f.seek(0)
     data = NamedTemporaryFile(mode="wb", delete=False)
     
     wave_data = wave.open(data)
     wave_data.setnchannels(self.channels)
     wave_data.setsampwidth(self.sample_width)
     wave_data.setframerate(self.frame_rate)
     wave_data.setnframes(self.frame_count())
     wave_data.writeframesraw(self._data)
     wave_data.close()
     
     
     output = NamedTemporaryFile(mode="w+")
     
     # read stdin / write stdout
     subprocess.call(['ffmpeg', 
                      '-y', # always overwrite existing files
                      "-f", "wav", "-i", data.name, # input options (filename last)
                      "-f", format, output.name, # output options (filename last)
                      ], 
                     
                     # make ffmpeg shut up
                     stderr=open(os.devnull))
     
     output.seek(0)
     out_f.write(output.read())
     
     data.unlink(data.name)
     out_f.seek(0)
     return out_f
Ejemplo n.º 24
0
def main():
    """Implement the "rose config-dump" command."""
    opt_parser = RoseOptionParser()
    opt_parser.add_my_options("conf_dir", "files", "no_pretty_mode")
    opts, args = opt_parser.parse_args()
    verbosity = opts.verbosity - opts.quietness
    report = Reporter(verbosity)
    fs_util = FileSystemUtil(report)
    if opts.conf_dir:
        fs_util.chdir(opts.conf_dir)
    file_names = []
    if opts.files:
        file_names = opts.files
    else:
        for dirpath, dirnames, filenames in os.walk("."):
            for filename in fnmatch.filter(filenames, "rose-*.conf"):
                p = os.path.join(dirpath, filename)[2:] # remove leading ./
                file_names.append(p)
    for file_name in file_names:
        t = NamedTemporaryFile()
        node = ConfigLoader()(file_name)
        if not opts.no_pretty_mode:
            pretty_format_config(node)
        ConfigDumper()(node, t)
        t.seek(0)
        if not filecmp.cmp(t.name, file_name, shallow=False):
            report(ConfigDumpEvent(file_name))
            ConfigDumper()(node, file_name)
Ejemplo n.º 25
0
    def test_merge_sam():
        'It merges two sams'
        reference = NamedTemporaryFile(suffix='.sam')
        reference.write('''>SGN-U572743
atatata
>SGN-U576692
gcgc''')
        sam1 = NamedTemporaryFile(suffix='.sam')
        sam1.write('''@SQ	SN:SGN-U576692	LN:1714
@SQ	SN:SGN-U572743	LN:833
@RG	ID:g1	LB:g1	SM:g1
@RG	ID:g2	LB:g2	SM:g2
SGN-E221403	0	SGN-U576692	1416	207	168M	*	0	0	AGCCTGATAAAGGTCTGCCTACGTGTTTTAAGTGGAATCCGTTTCCCCATGTCCAAACCTTCTAAATAGTTTTTTGTGTTAGTTCTTGTATGCCACATACAAAAATTAACAAACTCTTTTGCCACATATGTTCCAGCACGTCAAAGCAACATGTATTTGAGCTACTTT	558<///035EB@;550300094>>FBF>>88>BBB200>@FFMMMJJ@@755225889>0..14444::FMF@@764444448@;;84444<//,4,.,<<QFBB;::/,,,.69FBB>9:2/.409;@@>88.7,//55;BDK@11,,093777777884241<:7	AS:i:160	XS:i:0	XF:i:3	XE:i:4	XN:i:0	RG:Z:g2
SGN-E221664	0	SGN-U572743	317	226	254M24S	*	0	0	GGATGATCTTAGAGCTGCCATTCAAAAGATGTTAGACACTCCTGGGCCATACTTGTTGGATGTGATTGTACCTCATCAGGAGCATGTTCTACCGATGATTCCCAGTGGCGGTGCTTTCAAAAATGTGATTACGGAGGGTGATGGGAGACGTTCCTATTGACTTTGAGAAGCTACATAACTAGTTCAAGGCATTGTATTATCTAAAATAAACTTAATATTTATGTTTACTTAAAAGTTTTTCATTGTGTGAAGGAAAAAAAAAAAAAAAAAAAAAAAAA	999@7<22-2***-,206433>:?9<,,,66:>00066=??EEAAA?B200002<<@@@=DB99777864:..0::@833099???<@488>></...<:B<<.,,8881288@BBDDBD885@@;;9:/9.,,,99B99233885558=?DKKKDDAA??DKBB=440/0<8?DEDFBB??6@152@@FBMFIIDDDDDDKKKOK@@@@DD:N688BBDDDBBBKKDEDDBN977?<9<111:<??==BKMPKKBB==99>QQYYYYYYYYYYYYQQ	AS:i:250	XS:i:0	XF:i:0	XE:i:7	XN:i:0	RG:Z:g1
''')
        sam1.flush()
        sam2 = NamedTemporaryFile(suffix='.sam')
        sam2.write('''@SQ	SN:SGN-U576692	LN:1714
@SQ	SN:SGN-U572743	LN:833
@RG	ID:g1	LB:g1	SM:g1
@RG	ID:g3	LB:g3	SM:g3
SGN-E200000	0	SGN-U572743	317	226	254M24S	*	0	0	GGATGATCTTAGAGKTGCCATTCAAAAGATGTTAGACACTCCTGGGCCATACTTGTTGGATGTGATTGTACCTCATCAGGAGCATGTTCTACCGATGATTCCCAGTGGCGGTGCTTTCAAAAATGTGATTACGGAGGGTGATGGGAGACGTTCCTATTGACTTTGAGAAGCTACATAACTAGTTCAAGGCATTGTATTATCTAAAATAAACTTAATATTTATGTTTACTTAAAAGTTTTTCATTGTGTGAAGGAAAAAAAAAAAAAAAAAAAAAAAAA	999@7<22-2***-,206433>:?9<,,,66:>00066=??EEAAA?B200002<<@@@=DB99777864:..0::@833099???<@488>></...<:B<<.,,8881288@BBDDBD885@@;;9:/9.,,,99B99233885558=?DKKKDDAA??DKBB=440/0<8?DEDFBB??6@152@@FBMFIIDDDDDDKKKOK@@@@DD:N688BBDDDBBBKKDEDDBN977?<9<111:<??==BKMPKKBB==99>QQYYYYYYYYYYYYQQ	AS:i:250	XS:i:0	XF:i:0	XE:i:7	XN:i:0	RG:Z:g1
SGN-E40000	0	SGN-U576692	1416	207	168M	*	0	0	AGCCTGATAAAGGTCTGCCTACGTGTTTTAAGTGGAATCCGTTTCCCCATGTCCAAACCTTCTAAATAGTTTTTTGTGTTAGTTCTTGTATGCCACATACAAAAATTAACAAACTCTTTTGCCACATATGTTCCAGCACGTCAAAGCAACATGTATTTGAGCTACTTT	558<///035EB@;550300094>>FBF>>88>BBB200>@FFMMMJJ@@755225889>0..14444::FMF@@764444448@;;84444<//,4,.,<<QFBB;::/,,,.69FBB>9:2/.409;@@>88.7,//55;BDK@11,,093777777884241<:7	AS:i:160	XS:i:0	XF:i:3	XE:i:4	XN:i:0	RG:Z:g3
''')
        sam2.flush()
        sam3	= NamedTemporaryFile(suffix='.sam')
        merge_sam(infiles=[sam1,	sam2],	outfile=sam3,	reference=reference)
        sam3.seek(0)
        sam3_content = sam3.read()

        assert	'SN:SGN-U572743'	in	sam3_content
        assert	'SGN-E200000'	in	sam3_content
        assert	'SGN-E221664'	in	sam3_content
Ejemplo n.º 26
0
    def get(self, request, deliveryid):
        delivery = get_object_or_404(Delivery, id=deliveryid)
        assignment_group = delivery.deadline.assignment_group
        if not (assignment_group.is_candidate(request.user) \
                    or assignment_group.is_examiner(request.user) \
                    or request.user.is_superuser \
                    or assignment_group.parentnode.is_admin(request.user)):
            return HttpResponseForbidden("Forbidden")
        dirname = '{}-{}-delivery{}'.format(
                assignment_group.parentnode.get_path(),
                assignment_group.get_candidates(separator='_'),
                delivery.number)
        zip_file_name = u'{}.zip'.format(dirname)

        tempfile = NamedTemporaryFile()
        zip_file = zipfile.ZipFile(tempfile, 'w');

        for filemeta in delivery.filemetas.all():
            file_content = filemeta.deliverystore.read_open(filemeta)
            zip_file.write(file_content.name, posixpath.join(dirname, filemeta.filename))
        zip_file.close()

        tempfile.seek(0)
        response = HttpResponse(FileWrapperWithExplicitClose(tempfile),
                                content_type="application/zip")
        response['Content-Disposition'] = "attachment; filename=%s" % \
            zip_file_name.encode("ascii", 'replace')
        response['Content-Length'] = stat(tempfile.name).st_size
        return response
Ejemplo n.º 27
0
    def decrypt(self, enctext):
        """ Decrypting an encrypted text by the use of a private key.

        :param enctext: The encrypted text as a string
        :return: The decrypted text
        """

        if self.log:
            self.log.info("input len: %d" % len(enctext))
        _, fil = make_temp("%s" % enctext, decode=False)
        ntf = NamedTemporaryFile(delete=not DEBUG)

        com_list = [self.xmlsec, "--decrypt",
                     "--privkey-pem", self.key_file,
                     "--output", ntf.name,
                     "--id-attr:%s" % ID_ATTR, ENC_KEY_CLASS,
                     fil]

        if self.debug:
            self.log.debug("Decrypt command: %s" % " ".join(com_list))

        pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
        p_out = pof.stdout.read()
        p_err = pof.stderr.read()

        if self.debug:
            self.log.debug("Decrypt result (out): %s" % (p_out,))
            self.log.debug("Decrypt result (err): %s" % (p_err,))

        ntf.seek(0)
        return ntf.read()
Ejemplo n.º 28
0
    def handle_say(self, engine, message, cmd, args, lang='sonid26', voice='Alyona'):
        phrase = args.strip().encode('utf-8')
        if not len(phrase):
            raise Exception(
                'Введіть, що сказати!'
                'Приклад: /report Мегас поц'
            )
        tk = gtts_token.Token().calculate_token(phrase)
        query = urllib.urlencode(dict(
            MyLanguages=lang,
            MySelectedVoice=voice,
            MyTextForTTS=phrase,
            t=1,
            SendToVaaS=''
            # q=phrase,
            # ie='UTF-8',
            # tl=lang,
            # total=1,
            # textlen=len(phrase),
            # tk=tk,
            # client='t',
            # ttsspeed=0.5
        ))
        url = 'http://www.acapela-group.com/demo-tts/DemoHTML5Form_V2.php'

        request = urllib2.Request(url, query)
        request.add_header('Referer', 'http://www.acapela-group.com/')
        # request.add_header('Accept-Encoding', 'identity;q=1, *;q=0')
        request.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36')
        # request.add_header('Range', 'bytes=0-')
        response = urllib2.urlopen(request)

        html = response.read()

        url = re.findall('http.*\.mp3', html)
        if not len(url):
            raise Exception('API послав нас нах :/')
        url = url[0]

        response = urllib2.urlopen(url.strip())
        data = response.read()

        af = NamedTemporaryFile(delete=False, suffix='.mp3')
        af.write(data)
        af.seek(0)

        af = open(af.name, 'rb')

        try:
            engine.telegram.sendAudio(
                chat_id=message.chat_id,
                audio=af,
                title=phrase
            )
            os.unlink(af.name)
            return True
        except:
            os.unlink(af.name)
            raise
        return True
Ejemplo n.º 29
0
def src_file():
    src = NamedTemporaryFile()
    src.write(data)
    src.flush()
    src.seek(0, SEEK_SET)

    return src
Ejemplo n.º 30
0
 def test_basic_behaviour(self):
     'It tests that we can get names using a FileNamingSchema'
     fhand = NamedTemporaryFile()
     engine = sqlalchemy.create_engine('sqlite:///:memory:')
     create_naming_database(engine)
     add_project_to_naming_database(engine, name='my_project', code='my',
                                    description='a test project')
     naming = DbNamingSchema(engine, project='my_project',
                             feature_kind='EST')
     naming = FileNamingSchema(fhand, naming)
     assert naming.get_uniquename(name='hola') == 'myES000001'
     assert naming.get_uniquename(name='hola') == 'myES000001'
     assert naming.get_uniquename(name='caracol') == 'myES000002'
     naming.commit()
     fhand.seek(0)
     naming = FileNamingSchema(fhand)
     naming.kind = 'EST'
     assert naming.get_uniquename(name='hola') == 'myES000001'
     assert naming.get_uniquename(name='caracol') == 'myES000002'
     try:
         assert naming.get_uniquename(name='pascual')
         self.fail()
         #pylint: disable-msg=W0704
     except ValueError:
         pass
Ejemplo n.º 31
0
def FFT(job):
    rawFileHash = job.get('hash')
    segmentHash = job.data['segment-hash']

    # WAV: The number of channel is contained in the shape of data (len, numChannels)
    sampleFrequency, rdata = read(
        filepath.GetSegmentFromHash(rawFileHash, segmentHash))
    # Convert to complex for the FFT
    data = rdata.astype(np.complex_)

    fftSampleFrenquency = 80
    fftSampleLength = math.ceil(0.25 * sampleFrequency)
    hamming = np.reshape(np.repeat(np.hamming(fftSampleLength), data.shape[1]),
                         (fftSampleLength, 2))
    # Number of FFT to compute
    total = math.floor(
        (len(data) - fftSampleLength) * fftSampleFrenquency / sampleFrequency)

    # We don't have the file hash yet
    filepath.EnsureExists('/data/temp/file')
    file = NamedTemporaryFile(mode='w+b', dir='/data/temp', delete=False)
    # File header, [number of FFT Samples, FFT sample length, original file sample frequency, FFT sample frequency]
    file.write(np.zeros(4, dtype=np.int32).tobytes())

    print('[fft] Data shape:', data.shape)

    # Start the job
    job.start(total)

    # Debug purpose only
    allData = np.array([], dtype=np.float_)

    index = 0
    while math.ceil(index * sampleFrequency /
                    fftSampleFrenquency) + fftSampleLength <= len(data):
        s = math.ceil(index * sampleFrequency / fftSampleFrenquency)
        e = math.ceil(
            index * sampleFrequency / fftSampleFrenquency) + fftSampleLength

        fftData = spfft.fft(data[s:e] * hamming)
        fftPowerData = np.sum(np.absolute(fftData, dtype=np.float_), axis=1)
        fftPowerData = 1.0 / fftSampleLength * fftPowerData**2

        # TODO: We could ignore the later half of the data (negative frequencies) as it's symmetric for FFT of real signals
        file.write(fftPowerData.tobytes())
        # print(file.tell(), np.amax(fftPowerData))

        # Debug purpose only
        allData = np.append(allData, fftPowerData)

        index = index + 1

        job.update(index)

    # Debug purpose only
    plt.imshow(np.reshape(allData, (fftSampleLength, -1)), aspect='auto')
    filepath.EnsureExists(
        filepath.GetFFTFromHash(rawFileHash, segmentHash) + '.svg')
    plt.savefig(filepath.GetFFTFromHash(rawFileHash, segmentHash) + '.svg')

    file.seek(0)
    file.write(
        np.array(
            [index, fftSampleLength, sampleFrequency, fftSampleFrenquency],
            dtype=np.int32).tobytes())
    file.close()

    print('[fft] Processing complete')

    print('[fft] Finalizing...')

    # Rename the temporary file
    fftHash = filepath.Hash(file.name)
    filename = filepath.GetFFTFromHash(rawFileHash, fftHash)

    filepath.EnsureExists(filename)
    os.rename(file.name, filename)

    job.data['fft-hash'] = fftHash
    job.complete()
    redis.open().publish('JOB-UPDATE-FFT', job.serialize())

    print('[fft] [SUCCESS] Job', rawFileHash, '-', segmentHash)
    print('[fft] [SUCCESS] Job result', rawFileHash, '-', fftHash)
Ejemplo n.º 32
0
    def export(self,
               out_f=None,
               format='mp3',
               codec=None,
               bitrate=None,
               parameters=None,
               tags=None,
               id3v2_version='4',
               cover=None):
        """
        Export an AudioSegment to a file with given options

        out_f (string):
            Path to destination audio file. Also accepts os.PathLike objects on
            python >= 3.6

        format (string)
            Format for destination audio file.
            ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)

        codec (string)
            Codec used to encode the destination file.

        bitrate (string)
            Bitrate used when encoding destination file. (64, 92, 128, 256, 312k...)
            Each codec accepts different bitrate arguments so take a look at the
            ffmpeg documentation for details (bitrate usually shown as -b, -ba or
            -a:b).

        parameters (list of strings)
            Aditional ffmpeg/avconv parameters

        tags (dict)
            Set metadata information to destination files
            usually used as tags. ({title='Song Title', artist='Song Artist'})

        id3v2_version (string)
            Set ID3v2 version for tags. (default: '4')

        cover (file)
            Set cover for audio file from image file. (png or jpg)
        """
        id3v2_allowed_versions = ['3', '4']

        if format == "raw" and (codec is not None or parameters is not None):
            raise AttributeError(
                'Can not invoke ffmpeg when export format is "raw"; '
                'specify an ffmpeg raw format like format="s16le" instead '
                'or call export(format="raw") with no codec or parameters')

        out_f, _ = _fd_or_path_or_tempfile(out_f, 'wb+')
        out_f.seek(0)

        if format == "raw":
            out_f.write(self._data)
            out_f.seek(0)
            return out_f

        # wav with no ffmpeg parameters can just be written directly to out_f
        easy_wav = format == "wav" and codec is None and parameters is None

        if easy_wav:
            data = out_f
        else:
            data = NamedTemporaryFile(mode="wb", delete=False)

        pcm_for_wav = self._data
        if self.sample_width == 1:
            # convert to unsigned integers for wav
            pcm_for_wav = audioop.bias(self._data, 1, 128)

        wave_data = wave.open(data, 'wb')
        wave_data.setnchannels(self.channels)
        wave_data.setsampwidth(self.sample_width)
        wave_data.setframerate(self.frame_rate)
        # For some reason packing the wave header struct with
        # a float in python 2 doesn't throw an exception
        wave_data.setnframes(int(self.frame_count()))
        wave_data.writeframesraw(pcm_for_wav)
        wave_data.close()

        # for easy wav files, we're done (wav data is written directly to out_f)
        if easy_wav:
            out_f.seek(0)
            return out_f

        output = NamedTemporaryFile(mode="w+b", delete=False)

        # build converter command to export
        conversion_command = [
            self.converter,
            '-y',  # always overwrite existing files
            "-f",
            "wav",
            "-i",
            data.name,  # input options (filename last)
        ]

        if codec is None:
            codec = self.DEFAULT_CODECS.get(format, None)

        if cover is not None:
            if cover.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.tif',
                                       '.tiff')) and format == "mp3":
                conversion_command.extend(
                    ["-i", cover, "-map", "0", "-map", "1", "-c:v", "mjpeg"])
            else:
                raise AttributeError(
                    "Currently cover images are only supported by MP3 files. The allowed image formats are: .tif, .jpg, .bmp, .jpeg and .png."
                )

        if codec is not None:
            # force audio encoder
            conversion_command.extend(["-acodec", codec])

        if bitrate is not None:
            conversion_command.extend(["-b:a", bitrate])

        if parameters is not None:
            # extend arguments with arbitrary set
            conversion_command.extend(parameters)

        if tags is not None:
            if not isinstance(tags, dict):
                raise InvalidTag("Tags must be a dictionary.")
            else:
                # Extend converter command with tags
                # print(tags)
                for key, value in tags.items():
                    conversion_command.extend(
                        ['-metadata', '{0}={1}'.format(key, value)])

                if format == 'mp3':
                    # set id3v2 tag version
                    if id3v2_version not in id3v2_allowed_versions:
                        raise InvalidID3TagVersion(
                            "id3v2_version not allowed, allowed versions: %s" %
                            id3v2_allowed_versions)
                    conversion_command.extend(
                        ["-id3v2_version", id3v2_version])

        if sys.platform == 'darwin' and codec == 'mp3':
            conversion_command.extend(["-write_xing", "0"])

        conversion_command.extend([
            "-f",
            format,
            output.name,  # output options (filename last)
        ])

        log_conversion(conversion_command)

        # read stdin / write stdout
        with open(os.devnull, 'rb') as devnull:
            p = subprocess.Popen(conversion_command,
                                 stdin=devnull,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
        p_out, p_err = p.communicate()

        log_subprocess_output(p_out)
        log_subprocess_output(p_err)

        if p.returncode != 0:
            raise CouldntEncodeError(
                "Encoding failed. ffmpeg/avlib returned error code: {0}\n\nCommand:{1}\n\nOutput from ffmpeg/avlib:\n\n{2}"
                .format(p.returncode, conversion_command,
                        p_err.decode(errors='ignore')))

        output.seek(0)
        out_f.write(output.read())

        data.close()
        output.close()

        os.unlink(data.name)
        os.unlink(output.name)

        out_f.seek(0)
        return out_f
import requests
import shutil
import os.path
from glob import glob
from tempfile import NamedTemporaryFile

DATA_URL = 'http://stash.compjour.org/data/sunlight-twitter-opener.zip'
DATA_DIR = 'data-hold'

tzip = NamedTemporaryFile(suffix='.zip')
print("Downloading", DATA_URL)
r = requests.get(DATA_URL)
tzip.write(r.content)
tzip.seek(0)
print("Unzipping to", DATA_DIR)
shutil.unpack_archive(tzip.name, DATA_DIR)
tzip.close()

csvname = os.path.join(DATA_DIR, 'sunlight_legislators.csv')
print(csvname, 'has', os.path.getsize(csvname), 'bytes')
print("Tweet files:", len(glob('data-hold/tweets/*.json')))
print("Profile files:", len(glob('data-hold/profiles/*.json')))
Ejemplo n.º 34
0
    def write_inputs(
        self,
        X: "npt.ArrayLike",
        y: "npt.ArrayLike",
        groups: "Optional[npt.ArrayLike]" = None,
        covariates: "Optional[npt.ArrayLike]" = None,
    ) -> "Iterator":
        from tempfile import NamedTemporaryFile
        X_file = NamedTemporaryFile(mode="w+")
        y_file = NamedTemporaryFile(mode="w+")

        if (covariates is not None) or (groups is not None):
            cov_file: "Optional[IO[str]]" = NamedTemporaryFile(mode="w+")
        else:
            cov_file = None

        try:
            #  self._check_is_singular(np.asarray(X))
            X_ = self.genos_to_bimbam(X)
            X_.to_csv(X_file, index=False, header=False, na_rep="NA")
            X_file.seek(0)

            y_ = np.asarray(y)
            if len(y_.shape) == 1:
                y_ = np.expand_dims(y_, -1)

            pd.DataFrame(y_).to_csv(
                y_file,
                index=False,
                header=False,
                na_rep="NA"
            )
            y_file.seek(0)

            cov: "List[np.ndarray]" = []

            if groups is not None:
                cov.append(self.prep_groups(groups))

            if covariates is not None:
                cov.append(np.asarray(covariates))

            if len(cov) > 0:
                """
                We add a column of ones to include an intercept term.
                """

                assert cov_file is not None
                cov_ = np.concatenate(
                    [np.ones((y_.shape[0], 1)), *cov],
                    axis=1
                )
                pd.DataFrame(cov_).to_csv(
                    cov_file,
                    index=False,
                    header=False,
                    na_rep="NA"
                )
                cov_file.seek(0)

            del X_
            del y_
            del cov

            yield X_file, y_file, cov_file
        finally:
            X_file.close()
            y_file.close()

            if cov_file is not None:
                cov_file.close()

        return
Ejemplo n.º 35
0
class Warc(object):
    MAIN_URL = "WARC-X-Main-URL"

    _main_url = None
    _file_name = None
    _warc_file_read = None
    _warc_file_write = None
    _temporary = None
    _read_only = False

    def __init__(self, file_name, temporary=False, read_only=False, **kwargs):
        self._main_url = kwargs.get("main_url")
        self._file_name = file_name
        self._temporary = temporary
        self._read_only = read_only if not self._temporary else False

        if self._temporary:
            if not self._main_url:
                raise ValueError("Missing required argument: main_url")

            self._warc_file_read = NamedTemporaryFile("rb")
            self._warc_file_write = open(self._warc_file_read.name, "wb")

            self._init_file()

        else:
            if self._read_only:
                self._warc_file_read = open(file_name, "rb")

            else:
                self._warc_file_read = open(file_name, "rb")
                self._warc_file_write = open(file_name, "ab")

    def find_record(self, url):
        self._warc_file_read.seek(0)
        wrs = WarcRecord.open_archive(file_handle=self._warc_file_read, \
                gzip="record")

        for (offset, record, errors) in wrs.read_records(limit=None):
            if record and (record.type == WarcRecord.RESPONSE) \
                    and (record.content[0] == ResponseMessage.CONTENT_TYPE) \
                    and (record.url == url):
                return record

        return None

    def write_record(self, record):
        if self._read_only:
            raise RuntimeError("WARC opened for read-only access")

        self._warc_file_write.seek(0, os.SEEK_END)
        record.write_to(self._warc_file_write, gzip=True)
        self._warc_file_write.flush()

    def make_permanent(self):
        if not self._temporary:
            raise RuntimeError("This WARC is not temporary")

        warc_file = open(self._file_name, "wb")
        self._warc_file_read.seek(0)

        # copy temp file to it's permanent location
        shutil.copyfileobj(self._warc_file_read, warc_file)
        warc_file.flush()

        self._warc_file_read = open(self._file_name, "rb")
        self._warc_file_write = warc_file

        self._temporary = False

    @property
    def main_url(self):
        return self._main_url

    @property
    def temporary(self):
        return self._temporary

    @property
    def read_only(self):
        return self._read_only

    def _init_file(self):
        warcinfo_headers = [
            (WarcRecord.TYPE, WarcRecord.WARCINFO),
            (WarcRecord.ID, WarcRecord.random_warc_uuid()),
            (WarcRecord.DATE, warc.warc_datetime_str(datetime.utcnow())),
            (WarcRecord.FILENAME, os.path.basename(self._file_name)),
            (Warc.MAIN_URL, self._main_url),
        ]

        warcinfo_fields = "\r\n".join([
            "software: bardo",
            "format: WARC File Format 1.0",
            "conformsTo: " + CONFORMS_TO,
            "robots: unknown",
        ])

        warcinfo_content = ("application/warc-fields", warcinfo_fields)

        warcinfo_record = WarcRecord(headers=warcinfo_headers, \
                content=warcinfo_content)

        self.write_record(warcinfo_record)

    def _load_warc_info(self):
        self._warc_file_read.seek(0)
        wrs = WarcRecord.open_archive(file_handle=self._warc_file_read, \
                gzip="record")
        temp = wrs.read_records(limit=1)

        if not temp or (temp[0].type != WarcRecord.WARCINFO):
            raise ValueError("WARC info not found")

        return temp[0]
Ejemplo n.º 36
0
class LazyZipOverHTTP:
    """File-like object mapped to a ZIP file over HTTP.

    This uses HTTP range requests to lazily fetch the file's content,
    which is supposed to be fed to ZipFile.  If such requests are not
    supported by the server, raise HTTPRangeRequestUnsupported
    during initialization.
    """
    def __init__(self, url, session, chunk_size=CONTENT_CHUNK_SIZE):
        # type: (str, PipSession, int) -> None
        head = session.head(url, headers=HEADERS)
        raise_for_status(head)
        assert head.status_code == 200
        self._session, self._url, self._chunk_size = session, url, chunk_size
        self._length = int(head.headers['Content-Length'])
        self._file = NamedTemporaryFile()
        self.truncate(self._length)
        self._left = []  # type: List[int]
        self._right = []  # type: List[int]
        if 'bytes' not in head.headers.get('Accept-Ranges', 'none'):
            raise HTTPRangeRequestUnsupported('range request is not supported')
        self._check_zip()

    @property
    def mode(self):
        # type: () -> str
        """Opening mode, which is always rb."""
        return 'rb'

    @property
    def name(self):
        # type: () -> str
        """Path to the underlying file."""
        return self._file.name

    def seekable(self):
        # type: () -> bool
        """Return whether random access is supported, which is True."""
        return True

    def close(self):
        # type: () -> None
        """Close the file."""
        self._file.close()

    @property
    def closed(self):
        # type: () -> bool
        """Whether the file is closed."""
        return self._file.closed

    def read(self, size=-1):
        # type: (int) -> bytes
        """Read up to size bytes from the object and return them.

        As a convenience, if size is unspecified or -1,
        all bytes until EOF are returned.  Fewer than
        size bytes may be returned if EOF is reached.
        """
        download_size = max(size, self._chunk_size)
        start, length = self.tell(), self._length
        stop = length if size < 0 else min(start + download_size, length)
        start = max(0, stop - download_size)
        self._download(start, stop - 1)
        return self._file.read(size)

    def readable(self):
        # type: () -> bool
        """Return whether the file is readable, which is True."""
        return True

    def seek(self, offset, whence=0):
        # type: (int, int) -> int
        """Change stream position and return the new absolute position.

        Seek to offset relative position indicated by whence:
        * 0: Start of stream (the default).  pos should be >= 0;
        * 1: Current position - pos may be negative;
        * 2: End of stream - pos usually negative.
        """
        return self._file.seek(offset, whence)

    def tell(self):
        # type: () -> int
        """Return the current possition."""
        return self._file.tell()

    def truncate(self, size=None):
        # type: (Optional[int]) -> int
        """Resize the stream to the given size in bytes.

        If size is unspecified resize to the current position.
        The current stream position isn't changed.

        Return the new file size.
        """
        return self._file.truncate(size)

    def writable(self):
        # type: () -> bool
        """Return False."""
        return False

    def __enter__(self):
        # type: () -> LazyZipOverHTTP
        self._file.__enter__()
        return self

    def __exit__(self, *exc):
        # type: (*Any) -> Optional[bool]
        return self._file.__exit__(*exc)

    @contextmanager
    def _stay(self):
        # type: ()-> Iterator[None]
        """Return a context manager keeping the position.

        At the end of the block, seek back to original position.
        """
        pos = self.tell()
        try:
            yield
        finally:
            self.seek(pos)

    def _check_zip(self):
        # type: () -> None
        """Check and download until the file is a valid ZIP."""
        end = self._length - 1
        for start in reversed(range(0, end, self._chunk_size)):
            self._download(start, end)
            with self._stay():
                try:
                    # For read-only ZIP files, ZipFile only needs
                    # methods read, seek, seekable and tell.
                    ZipFile(self)  # type: ignore
                except BadZipfile:
                    pass
                else:
                    break

    def _stream_response(self, start, end, base_headers=HEADERS):
        # type: (int, int, Dict[str, str]) -> Response
        """Return HTTP response to a range request from start to end."""
        headers = base_headers.copy()
        headers['Range'] = f'bytes={start}-{end}'
        # TODO: Get range requests to be correctly cached
        headers['Cache-Control'] = 'no-cache'
        return self._session.get(self._url, headers=headers, stream=True)

    def _merge(self, start, end, left, right):
        # type: (int, int, int, int) -> Iterator[Tuple[int, int]]
        """Return an iterator of intervals to be fetched.

        Args:
            start (int): Start of needed interval
            end (int): End of needed interval
            left (int): Index of first overlapping downloaded data
            right (int): Index after last overlapping downloaded data
        """
        lslice, rslice = self._left[left:right], self._right[left:right]
        i = start = min([start] + lslice[:1])
        end = max([end] + rslice[-1:])
        for j, k in zip(lslice, rslice):
            if j > i:
                yield i, j - 1
            i = k + 1
        if i <= end:
            yield i, end
        self._left[left:right], self._right[left:right] = [start], [end]

    def _download(self, start, end):
        # type: (int, int) -> None
        """Download bytes from start to end inclusively."""
        with self._stay():
            left = bisect_left(self._right, start)
            right = bisect_right(self._left, end)
            for start, end in self._merge(start, end, left, right):
                response = self._stream_response(start, end)
                response.raise_for_status()
                self.seek(start)
                for chunk in response_chunks(response, self._chunk_size):
                    self._file.write(chunk)
Ejemplo n.º 37
0
class S3KeyWritableFileObject(RawIOBase):
    def __init__(
            self, object_summery, *,
            path,
            mode='w',
            buffering=DEFAULT_BUFFER_SIZE,
            encoding=None,
            errors=None,
            newline=None):
        super().__init__()
        self.object_summery = object_summery
        self.path = path
        self.mode = mode
        self.buffering = buffering
        self.encoding = encoding
        self.errors = errors
        self.newline = newline
        self._cache = NamedTemporaryFile(
            mode=self.mode + '+' if 'b' in self.mode else 'b' + self.mode + '+',
            buffering=self.buffering,
            encoding=self.encoding,
            newline=self.newline)
        self._string_parser = partial(_string_parser, mode=self.mode, encoding=self.encoding)

    def __getattr__(self, item):
        try:
            return getattr(self._cache, item)
        except AttributeError:
            return super().__getattribute__(item)

    def writable_check(method):
        @wraps(method)
        def wrapper(self, *args, **kwargs):
            if not self.writable():
                raise UnsupportedOperation('not writable')
            return method(self, *args, **kwargs)
        return wrapper

    def writable(self, *args, **kwargs):
        return 'w' in self.mode

    @writable_check
    def write(self, text):
        self._cache.write(self._string_parser(text))
        self._cache.seek(0)
        _s3_accessor.boto3_method_with_parameters(
            self.object_summery.put,
            kwargs={'Body': self._cache}
        )

    def writelines(self, lines):
        if not lines:
            return
        if isinstance(lines[0], bytes):
            joined = b"".join(lines)
        else:
            joined = "".join(lines)
        self.write(joined)

    def readable(self):
        return False

    def read(self, *args, **kwargs):
        raise UnsupportedOperation('not readable')

    def readlines(self, *args, **kwargs):
        raise UnsupportedOperation('not readable')
Ejemplo n.º 38
0
def main(input_file, output_file, debug):
    if input_file == '-':
        if debug:
            raise BaseException("cannot run --debug with stdin input")
        input_file = sys.stdin
    else:
        input_file = open(input_file)

    # store / filter input
    tmpf = NamedTemporaryFile()
    # created digest
    output = NamedTemporaryFile()

    # mailbox needs filename :( that's breaking duck typing!
    # with below From hack I could probably split msgs myself altogether
    for line in input_file:
        # mutt pipe-to doesn't do mbox format :( this will break soon :)
        if line.startswith('Delivered-To: '):
            tmpf.write('From [email protected] Thu Aug 15 16:24:28 2019\n')
        tmpf.write(line)
    tmpf.flush()
    tmpf.seek(0)

    mbox = mailbox.mbox(tmpf.name)
    # transform headers to dict, lowercase and merge multiline headers, decode
    # quoted-printable bodies
    mbox_usable = []
    for msg in mbox:
        mbox_usable.append(MyMail())
        mbox_usable[-1].headers = normalize_headers(msg._headers)
        # Get the only part or the text/plain one if there are multiple
        payload = msg.get_payload()
        if isinstance(payload, type([])):
            for submsg in payload:
                subpart_headers = normalize_headers(submsg._headers)
                if subpart_headers['content-type'].startswith('text/plain'):
                    # add headers from multipart subpart
                    mbox_usable[-1].headers.update(subpart_headers)
                    mbox_usable[-1].body = maybe_decode(
                        mbox_usable[-1].headers['content-transfer-encoding'],
                        submsg.get_payload())
                    break
            else:
                print "warning: message doesn't have text/plain part"
        else:
            mbox_usable[-1].body = maybe_decode(
                mbox_usable[-1].headers['content-transfer-encoding'], payload)

    mbox_usable.sort(key=lambda x: parsedate(x.headers['date']))

    if debug:
        code.interact(local=locals())

    first = True
    for msg in mbox_usable:
        if first is True:
            print >> output, '>____________________________________________________________________<'
            print >> output, 'Date: ', msg.headers.get('date')
            print >> output, 'Subject: ', msg.headers.get('subject')
            print >> output
            print >> output, msg.body
            first = False
        else:
            print >> output, '>____________________________________________________________________<'
            print >> output, 'Date: ', msg.headers.get('date')
            print >> output, filter_stuff(msg.body)

    output.flush()
    tmpf.close()

    os.system("vim -c 'set ft=mail' -c 'set wrap' '%s'" % output.name)
Ejemplo n.º 39
0
            except Exception, ex:
                print 'Error reading file %s.' % outfile
                print 'Error: ', ex
                print 'Deleting and recaching file.'
                os.remove(outfile)

            warnings.resetwarnings()

        if not os.path.exists(outfile):

            if not self.quiet: print 'Caching ft1 file. Saving to %s' % outfile

            if isinstance(self.sa.pixeldata.ft1files, collections.Iterable):
                temp = NamedTemporaryFile(delete=True)
                temp.write('\n'.join(self.sa.pixeldata.ft1files))
                temp.seek(0)
                infile = '@%s' % temp.name
            else:
                infile = self.sa.pixeldata.ft1files

            import GtApp
            GtApp.GtApp("gtselect",
                        'dataSubselector').run(infile=infile,
                                               outfile=outfile,
                                               ra=self.roi_dir.ra(),
                                               dec=self.roi_dir.dec(),
                                               rad=self.sa.maxROI,
                                               tmin=0,
                                               tmax=0,
                                               emin=emin,
                                               emax=emax,
Ejemplo n.º 40
0
    def create(self, request):
        """
        Does not work in Swagger!

        Create a new Property from a building file.
        ---
        consumes:
            - multipart/form-data
        parameters:
            - name: organization_id
              type: integer
              required: true
            - name: cycle_id
              type: integer
              required: true
            - name: file_type
              type: string
              enum: ["Unknown", "BuildingSync", "HPXML"]
              required: true
            - name: file
              description: In-memory file object
              required: true
              type: file
        """
        if len(request.FILES) == 0:
            return JsonResponse({
                'success':
                False,
                'message':
                'Must pass file in as a Multipart/Form post'
            })

        the_file = request.data['file']
        file_type = BuildingFile.str_to_file_type(
            request.data.get('file_type', 'Unknown'))

        organization_id = request.data['organization_id']
        cycle = request.data.get('cycle_id', None)

        if not cycle:
            return JsonResponse({
                'success': False,
                'message': 'Cycle ID is not defined'
            })
        else:
            cycle = Cycle.objects.get(pk=cycle)

        # figure out if file is xml or zip
        the_filename = the_file._get_name()
        tmp_filename, file_extension = os.path.splitext(the_filename)
        # initialize
        p_status = True
        property_state = True
        messages = {'errors': [], 'warnings': []}

        if file_extension == '.zip':
            # ZIP FILE, extract and process files one by one
            # print("This file is a ZIP")

            with zipfile.ZipFile(the_file, "r", zipfile.ZIP_STORED) as openzip:
                filelist = openzip.infolist()
                for f in filelist:
                    # print("FILE: {}".format(f.filename))
                    # process xml files
                    if '.xml' in f.filename and '__MACOSX' not in f.filename:
                        # print("PROCESSING file: {}".format(f.filename))
                        data_file = NamedTemporaryFile()
                        data_file.write(openzip.read(f))
                        data_file.seek(0)
                        size = os.path.getsize(data_file.name)
                        content_type = 'text/xml'
                        # print("DATAFILE:")
                        # print(data_file)
                        a_file = InMemoryUploadedFile(data_file,
                                                      'data_file',
                                                      f.filename,
                                                      content_type,
                                                      size,
                                                      charset=None)

                        building_file = BuildingFile.objects.create(
                            file=a_file,
                            filename=f.filename,
                            file_type=file_type,
                        )
                        p_status_tmp, property_state_tmp, property_view, messages_tmp = building_file.process(
                            organization_id, cycle)
                        # print('messages_tmp: ')
                        # print(messages_tmp)

                        # append errors to overall messages
                        for i in messages_tmp['errors']:
                            messages['errors'].append(f.filename + ": " + i)
                        for i in messages_tmp['warnings']:
                            messages['warnings'].append(f.filename + ": " + i)

                        if not p_status_tmp:
                            # capture error
                            p_status = p_status_tmp
                        else:
                            # capture a real property_state (not None)
                            property_state = property_state_tmp

        else:
            # just an XML
            building_file = BuildingFile.objects.create(
                file=the_file,
                filename=the_file.name,
                file_type=file_type,
            )

            p_status, property_state, property_view, messages = building_file.process(
                organization_id, cycle)

        if p_status and property_state:
            if len(messages['warnings']) > 0:
                return JsonResponse({
                    'success': True,
                    'status': 'success',
                    'message': {
                        'warnings': messages['warnings']
                    },
                    'data': {
                        'property_view':
                        PropertyViewAsStateSerializer(property_view).data,
                        # 'property_state': PropertyStateWritableSerializer(property_state).data,
                    },
                })
            else:
                return JsonResponse({
                    'success': True,
                    'status': 'success',
                    'message': {
                        'warnings': []
                    },
                    'data': {
                        'property_view':
                        PropertyViewAsStateSerializer(property_view).data,
                        # 'property_state': PropertyStateWritableSerializer(property_state).data,
                    },
                })
        else:
            return JsonResponse(
                {
                    'success': False,
                    'status': 'error',
                    'message': messages
                },
                status=status.HTTP_400_BAD_REQUEST)
Ejemplo n.º 41
0
class TestMothurClassifySeqs(TestCase):
    def setUp(self):
        self.ref_file = NamedTemporaryFile()
        self.ref_file.write(mothur_ref_seqs)
        self.ref_file.seek(0)

        self.tax_file = NamedTemporaryFile()
        self.tax_file.write(mothur_taxonomy)
        self.tax_file.seek(0)

    def test_app(self):
        app = MothurClassifySeqs({
            'reference': self.ref_file.name,
            'taxonomy': self.tax_file.name,
        })
        res = app(mothur_seqs)
        assignments = res['assignments'].read()
        self.assertEqual(assignments, mothur_assignments)

        summary = res['summary'].read()
        self.assertEqual(summary, mothur_summary)

    def test_format_function_arguments(self):
        app = MothurClassifySeqs({
            'reference': '/home/myuser/ref-seqs.fasta',
            'taxonomy': '/home/MyUser/data/tax.txt',
            'cutoff': 80,
        })
        obs_args = app._format_function_arguments(
            ['reference', 'taxonomy', 'cutoff', 'iters'])
        exp_args = ("reference=/home/myuser/ref\\-seqs.fasta, "
                    "taxonomy=/home/MyUser/data/tax.txt, cutoff=80")
        self.assertEqual(obs_args, exp_args)

    def test_compile_mothur_script(self):
        app = MothurClassifySeqs({
            'reference': '/home/myuser/ref-seqs.fasta',
            'taxonomy': '/home/MyUser/data/tax.txt',
            'cutoff': 80,
        })
        app._input_filename = "/my/input.fasta"
        exp_script = ("#classify.seqs(fasta=/my/input.fasta, "
                      "reference=/home/myuser/ref\-seqs.fasta, "
                      "taxonomy=/home/MyUser/data/tax.txt, "
                      "cutoff=80)")
        self.assertEqual(app._compile_mothur_script(), exp_script)

    def test_mothur_classify_file(self):
        query_file = StringIO(mothur_seqs)
        res = mothur_classify_file(query_file, self.ref_file.name,
                                   self.tax_file.name)
        exp_res = {
            'A': ([
                'k__Bacteria', 'p__Firmicutes', 'c__Clostridia',
                'o__Clostridale', 'f__Eubacteriaceae', 'g__Eubacterium',
                's__Eubacteriumfoedans'
            ], 1.0),
            'Very': (['k__Bacteria', 'p__Bacteriodetes'], 1.0),
            '01': (['k__Bacteria', 'p__Firmicutes'], 1.0),
        }
        self.assertEqual(res, exp_res)

    def test_unclassifiable_sequence(self):
        query_file = StringIO(
            ">MostlyTs\nTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT"
            "TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTATTTTTTTTTTTTTTTTTTTTTTTTTTTTTT"
            "TTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\n"
        )
        res = mothur_classify_file(query_file, self.ref_file.name,
                                   self.tax_file.name)
        exp_res = {
            'MostlyTs': (['Unknown'], 0.0),
        }
        self.assertEqual(res, exp_res)
Ejemplo n.º 42
0
    def export(self,
               out_f=None,
               format='mp3',
               codec=None,
               bitrate=None,
               parameters=None,
               tags=None,
               id3v2_version='4'):
        """
        Export an AudioSegment to a file with given options

        out_f (string):
            Path to destination audio file

        format (string)
            Format for destination audio file.
            ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)

        codec (string)
            Codec used to encoding for the destination.

        bitrate (string)
            Bitrate used when encoding destination file. (64, 92, 128, 256, 312k...)
            Each codec accepts different bitrate arguments so take a look at the
            ffmpeg documentation for details (bitrate usually shown as -b, -ba or
            -a:b).

        parameters (string)
            Aditional ffmpeg/avconv parameters

        tags (dict)
            Set metadata information to destination files
            usually used as tags. ({title='Song Title', artist='Song Artist'})

        id3v2_version (string)
            Set ID3v2 version for tags. (default: '4')
        """
        id3v2_allowed_versions = ['3', '4']

        out_f = _fd_or_path_or_tempfile(out_f, 'wb+')
        out_f.seek(0)

        if format == "raw":
            out_f.write(self._data)
            out_f.seek(0)
            return out_f

        # for wav output we can just write the data directly to out_f
        if format == "wav":
            data = out_f
        else:
            data = NamedTemporaryFile(mode="wb", delete=False)

        wave_data = wave.open(data, 'wb')
        wave_data.setnchannels(self.channels)
        wave_data.setsampwidth(self.sample_width)
        wave_data.setframerate(self.frame_rate)
        # For some reason packing the wave header struct with
        # a float in python 2 doesn't throw an exception
        wave_data.setnframes(int(self.frame_count()))
        wave_data.writeframesraw(self._data)
        wave_data.close()

        # for wav files, we're done (wav data is written directly to out_f)
        if format == 'wav':
            return out_f

        output = NamedTemporaryFile(mode="w+b", delete=False)

        # build converter command to export
        conversion_command = [
            self.converter,
            '-y',  # always overwrite existing files
            "-f",
            "wav",
            "-i",
            data.name,  # input options (filename last)
        ]

        if codec is None:
            codec = self.DEFAULT_CODECS.get(format, None)

        if codec is not None:
            # force audio encoder
            conversion_command.extend(["-acodec", codec])

        if bitrate is not None:
            conversion_command.extend(["-b:a", bitrate])

        if parameters is not None:
            # extend arguments with arbitrary set
            conversion_command.extend(parameters)

        if tags is not None:
            if not isinstance(tags, dict):
                raise InvalidTag("Tags must be a dictionary.")
            else:
                # Extend converter command with tags
                # print(tags)
                for key, value in tags.items():
                    conversion_command.extend(
                        ['-metadata', '{0}={1}'.format(key, value)])

                if format == 'mp3':
                    # set id3v2 tag version
                    if id3v2_version not in id3v2_allowed_versions:
                        raise InvalidID3TagVersion(
                            "id3v2_version not allowed, allowed versions: %s" %
                            id3v2_allowed_versions)
                    conversion_command.extend(
                        ["-id3v2_version", id3v2_version])

        conversion_command.extend([
            "-f",
            format,
            output.name,  # output options (filename last)
        ])

        log_conversion(conversion_command)

        # read stdin / write stdout
        p = subprocess.Popen(conversion_command,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        p_out, p_err = p.communicate()

        if p.returncode != 0:
            raise CouldntEncodeError(
                "Encoding failed. ffmpeg/avlib returned error code: {0}\n\nOutput from ffmpeg/avlib:\n\n{1}"
                .format(p.returncode, p_err))

        output.seek(0)
        out_f.write(output.read())

        data.close()
        output.close()

        os.unlink(data.name)
        os.unlink(output.name)

        out_f.seek(0)
        return out_f
Ejemplo n.º 43
0
Archivo: o4.py Proyecto: cnxtech/o4
def o4_pyforce(debug, no_revision, args: list, quiet=False):
    """
    Encapsulates Pyforce, does book keeping to ensure that all files
    that should be operated on are in fact dealt with by p4. Handles
    retry and strips out asks for files that are caseful mismatches on
    the current file system (macOS).
    """

    from tempfile import NamedTemporaryFile
    from collections import defaultdict

    class LogAndAbort(Exception):
        'Dumps debug information on errors.'

    o4_log('pyforce', no_revision=no_revision, quiet=quiet, *args)

    tmpf = NamedTemporaryFile(dir='.o4')
    fstats = []
    for line in sys.stdin.read().splitlines():
        if line.startswith('#o4pass'):
            print(line)
            continue
        f = fstat_split(line)
        if f and caseful_accurate(f[F_PATH]):
            fstats.append(f)
        elif f:
            print(
                f"*** WARNING: Pyforce is skipping {f[F_PATH]} because it is casefully",
                "mismatching a local file.",
                file=sys.stderr)
    retries = 3
    head = _depot_path().replace('/...', '')
    while fstats:
        if no_revision:
            p4paths = [Pyforce.escape(f[F_PATH]) for f in fstats]
        else:
            p4paths = [
                f"{Pyforce.escape(f[F_PATH])}#{f[F_REVISION]}" for f in fstats
            ]
        tmpf.seek(0)
        tmpf.truncate()
        not_yet = []
        pargs = []
        xargs = []
        # This is a really bad idea, files are output to stdout before the actual
        # sync happens, causing checksum tests to start too early:
        #        if len(p4paths) > 30 and 'sync' in args:
        #            xargs.append('--parallel=threads=5')
        if sum(len(s) for s in p4paths) > 30000:
            pargs.append('-x')
            pargs.append(tmpf.name)
            for f in p4paths:
                tmpf.write(f.encode('utf8'))
                tmpf.write(b'\n')
            tmpf.flush()
        else:
            xargs.extend(p4paths)
        try:
            # TODO: Verbose
            #print('# PYFORCE({}, {}{})'.format(','.join(repr(a) for a in args), ','.join(
            #    repr(a) for a in paths[:3]), ', ...' if len(paths) > 3 else ''))
            errs = []
            repeats = defaultdict(list)
            infos = []
            for res in Pyforce(*pargs, *args, *xargs):
                if debug:
                    err_print("*** DEBUG: Received", repr(res))
                # FIXME: Delete this if-statement:
                if res.get('code', '') == 'info':
                    infos.append(res)
                    if res.get('data', '').startswith('Diff chunks: '):
                        continue
                if res.get('code', '') == 'error':
                    errs.append(res)
                    continue
                if 'resolveFlag' in res:
                    # TODO: resolveFlag can be ...?
                    #         m: merge
                    #         c: copy from  (not conflict!)
                    # We skip this entry as it is the second returned from p4
                    # for one input file
                    continue
                res_str = res.get('depotFile') or res.get('fromFile')
                if not res_str and res.get('data'):
                    res_str = head + '/' + res['data']
                if not res_str:
                    errs.append(res)
                    continue
                res_str = Pyforce.unescape(res_str)
                for i, f in enumerate(fstats):
                    if f"{head}/{f[F_PATH]}" in res_str:
                        repeats[f"{head}/{f[F_PATH]}"].append(res)
                        not_yet.append(fstats.pop(i))
                        break
                else:
                    for f in repeats.keys():
                        if f in res_str:
                            if debug:
                                err_print(
                                    f"*** DEBUG: REPEAT: {res_str}\n {res}\n {repeats[f]}"
                                )
                            break
                    else:
                        if debug:
                            err_print("*** DEBUG: ERRS APPEND", res)
                        errs.append(res)
            if errs:
                raise LogAndAbort('Unexpected reply from p4')

            if len(p4paths) == len(fstats):
                raise LogAndAbort('Nothing recognized from p4')
        except P4Error as e:
            non_recoverable = False
            for a in e.args:
                if 'clobber writable file' in a['data']:
                    fname = a['data'].split('clobber writable file')[1].strip()
                    print("*** WARNING: Saving writable file as .bak:",
                          fname,
                          file=sys.stderr)
                    if os.path.exists(fname + '.bak'):
                        now = time.time()
                        print(
                            f"*** WARNING: Moved previous .bak to {fname}.{now}",
                            file=sys.stderr)
                        os.rename(fname + '.bak', f'{fname}.bak.{now}')
                    shutil.copy(fname, fname + '.bak')
                    os.chmod(fname, 0o400)
                else:
                    non_recoverable = True
            if non_recoverable:
                raise
        except P4TimeoutError as e:
            e = str(e).replace('\n', ' ')
            print(f"# P4 TIMEOUT, RETRIES {retries}: {e}", file=sys.stderr)
            retries -= 1
            if not retries:
                sys.exit(
                    f"{CLR}*** ERROR: Perforce timed out too many times:\n{e}")
        except LogAndAbort as e:
            import json
            fname = f'debug-pyforce.{os.getpid()}.{int(time.time())}'
            d = {
                'args': args,
                'fstats': fstats,
                'errs': errs,
                'repeats': repeats,
                'infos': infos,
            }
            json.dump(d, open(f'.o4/{fname}', 'wt'))
            sys.exit(f'{CLR}*** ERROR: {e}; detail in {fname}')
        finally:
            if not quiet:
                for fstat in not_yet:
                    # Printing the fstats after the p4 process has ended, because p4 marshals
                    # its objects before operation, as in "And for my next act... !"
                    # This premature printing leads to false checksum errors during sync.
                    print(fstat_join(fstat))
Ejemplo n.º 44
0
 def _writes(self, config: str, extension: str):
     f = NamedTemporaryFile(mode='r+', suffix='.' + extension)
     f.write(config)
     f.seek(0)
     return f
Ejemplo n.º 45
0
def get_file_from_s3(s3_bucket, path, conn_name=DEFAULT_CONN, decrypt=False):
    content = get_content_from_s3(s3_bucket, path, conn_name, decrypt)
    temp_file = NamedTemporaryFile()
    temp_file.write(content)
    temp_file.seek(0)
    return temp_file
Ejemplo n.º 46
0
 def temp_imagefile(width, height, format):
     i = CsvExportTest.image(width, height)
     f = NamedTemporaryFile(suffix='.jpg')
     i.save(f, format)
     f.seek(0)
     return f
Ejemplo n.º 47
0
class ParallelUclustConsensusTaxonomyAssignerTests(TestCase):
    def setUp(self):
        """
        """
        self.files_to_remove = []
        self.dirs_to_remove = []

        tmp_dir = get_qiime_temp_dir()
        self.test_out = get_tmp_filename(
            tmp_dir=tmp_dir,
            prefix='qiime_parallel_taxonomy_assigner_tests_',
            suffix='',
            result_constructor=str)
        self.dirs_to_remove.append(self.test_out)
        create_dir(self.test_out)

        self.tmp_seq_filepath = get_tmp_filename(
            tmp_dir=self.test_out,
            prefix='qiime_parallel_taxonomy_assigner_tests_input',
            suffix='.fasta')
        seq_file = open(self.tmp_seq_filepath, 'w')
        seq_file.write(uclust_test_seqs.toFasta())
        seq_file.close()
        self.files_to_remove.append(self.tmp_seq_filepath)

        self.id_to_taxonomy_file = NamedTemporaryFile(
            prefix='qiime_parallel_taxonomy_assigner_tests_id_to_taxonomy',
            suffix='.txt',
            dir=tmp_dir)
        self.id_to_taxonomy_file.write(uclust_id_to_taxonomy)
        self.id_to_taxonomy_file.seek(0)

        self.reference_seqs_file = NamedTemporaryFile(
            prefix='qiime_parallel_taxonomy_assigner_tests_ref_seqs',
            suffix='.fasta',
            dir=tmp_dir)
        self.reference_seqs_file.write(uclust_reference_seqs.toFasta())
        self.reference_seqs_file.seek(0)

        initiate_timeout(60)

    def tearDown(self):
        """ """
        disable_timeout()
        remove_files(self.files_to_remove)
        # remove directories last, so we don't get errors
        # trying to remove files which may be in the directories
        for d in self.dirs_to_remove:
            if exists(d):
                rmtree(d)

    def test_parallel_uclust_taxonomy_assigner(self):
        """ parallel_uclust_taxonomy_assigner functions as expected """
        params = {
            'id_to_taxonomy_fp': self.id_to_taxonomy_file.name,
            'reference_seqs_fp': self.reference_seqs_file.name,
            'uclust_min_consensus_fraction': 0.51,
            'uclust_similarity': 0.90,
            'uclust_max_accepts': 3
        }

        app = ParallelUclustConsensusTaxonomyAssigner()
        r = app(self.tmp_seq_filepath,
                self.test_out,
                params,
                job_prefix='UTATEST',
                poll_directly=True,
                suppress_submit_jobs=False)
        results = fields_to_dict(
            open(glob(join(self.test_out, '*_tax_assignments.txt'))[0], 'U'))
        # some basic sanity checks: we should get the same number of sequences
        # as our input with the same seq IDs. We should have a taxonomy string
        # and a confidence value for each seq as well.
        self.assertEqual(len(results), 6)
        self.assertEqual(len(results['s1']), 3)
        self.assertEqual(len(results['s6']), 3)
Ejemplo n.º 48
0
class Nitter(DockerBase):
    """Nitter Docker container object

    Args:
        host (IPv4Address): The host address the docker container will bind too.
        port (int): The port the docker container will listen to.

    Attributes:
        tempfile (TemporaryFile): A TemporaryFile file generated from a template.
        container (Container): Local representation of a container object.
            Holds the started instance of a docker container.
        address (str): The full address of the docker container.
        ports (dict[int, int]): Binds the listening port to the nitter docker container's
            internal port 8080.
        config_filepath (str): Path name to the generated tempfile.
        volumes (dict[str, dict[str, str]]): used to configure a bind volume.


    """

    host: IPv4Address
    port: int

    tempfile: TemporaryFile = None
    container: Optional[Container]

    class Config:
        arbitrary_types_allowed = True

    @property
    def address(self):
        return f"http://{self.host}:{self.port}"

    @property
    def config_filepath(self):
        if self.tempfile:
            return self.tempfile.name

    @property
    def ports(self):
        return {8080: self.port}

    @property
    def volumes(self):
        volumes = {"bind": "/src/nitter.conf", "mode": "ro"}
        return {self.config_filepath: volumes}

    def _render_config(self):
        env = Environment(loader=FileSystemLoader(TEMPLATES_DIRECTORY))
        template = env.get_template("nitter.conf")
        return template.render(self.dict())

    def _create_configfile(self):
        config = self._render_config()
        self.tempfile = NamedTemporaryFile(dir=PROJECT_ROOT)
        self.tempfile.write(config.encode())
        self.tempfile.seek(0)

    def get_profile(self, username: str, not_found_ok: bool = False):
        """Scrapes nitter for the target users profile information.

        This is a modified version of nitter_scraper.profile.get_profile().
        This version automatically uses the address of the docker container as the primary
        address to scrape profile data.

        Args:
            username: The target profiles username.
            not_found_ok: If not_found_ok is false (the default), a ValueError is raised if
                the target profile doesn't exist. If not_found_ok is true, None will be returned
                instead.

        Returns:
            Profile object if successfully scraped, otherwise None.

        Raises:
            ValueError: If the target profile does not exist and the not_found_ok argument is
                false.
        """
        return get_profile(username=username, not_found_ok=not_found_ok, address=self.address)

    def get_tweets(self, username: str, pages: int = 25, break_on_tweet_id: Optional[int] = None):
        """Gets the target users tweets

        This is a modified version of nitter_scraper.tweets.get_tweets().
        This version automatically uses the address of the docker container as the primary
        address to scrape profile data.

        Args:
            username: Targeted users username.
            pages: Max number of pages to lookback starting from the latest tweet.
            break_on_tweet_id: Gives the ability to break out of a loop if a tweets id is found.
            address: The address to scrape from. The default is https://nitter.net which should
                be used as a fallback address.

        Yields:
            Tweet Objects

        """

        return get_tweets(
            username=username,
            pages=pages,
            break_on_tweet_id=break_on_tweet_id,
            address=self.address,
        )

    def start(self):
        """Starts the docker the container"""
        self._create_configfile()
        client = self._get_client()

        self.container = client.containers.run(
            image="zedeus/nitter:2c6cabb4abe79166ce9973d8652fb213c1b0c5a2",
            auto_remove=True,
            ports=self.ports,
            detach=True,
            volumes=self.volumes,
        )
        time.sleep(1)
        logger.info(f"Running container {self.container.name} {self.container.short_id}.")

    def stop(self):
        """Stops the docker the container"""
        logger.info(f"Stopping container {self.container.name} {self.container.short_id}.")
        if self.container:
            self.container.stop(timeout=5)
            logger.info(f"Container {self.container.name} {self.container.short_id} Destroyed.")
Ejemplo n.º 49
0
ws = wb.active
ws.title = '워크시트1'
print(wb.sheetnames)

# 10행 10열에 데이터 넣기
for x in range(1, 11):
    for y in range(1, 11):
        ws.cell(row=x, column=y, value=random.randint(0, 100))
print("~" * 80)

# 저장하기
# wb.save("ex09_1.xlsx")
# 예를 들어 Pyramid, FlaskExam 또는 Django와 같은 웹 애플리케이션을 사용할 때
# 파일을 스트림에 저장하려면 다음을 제공하면됩니다 NamedTemporaryFile()
tmp = None
try:
    tmp = NamedTemporaryFile()
    file_name = tmp.name
    print(file_name)
    wb.save(file_name)
    tmp.seek(0)
    stream = tmp.read()
    print(stream)
except PermissionError as e:
    print(e)
finally:
    tmp.close()
    print("Bye!!!")

Ejemplo n.º 50
0
class ParallelRdpTaxonomyAssignerTests(TestCase):
    def setUp(self):
        """ """
        self.files_to_remove = []
        self.dirs_to_remove = []

        tmp_dir = get_qiime_temp_dir()
        self.test_out = get_tmp_filename(
            tmp_dir=tmp_dir,
            prefix='qiime_parallel_taxonomy_assigner_tests_',
            suffix='',
            result_constructor=str)
        self.dirs_to_remove.append(self.test_out)
        create_dir(self.test_out)

        # Temporary input file
        self.tmp_seq_filepath = get_tmp_filename(
            tmp_dir=self.test_out,
            prefix='qiime_parallel_taxonomy_assigner_tests_input',
            suffix='.fasta')
        seq_file = open(self.tmp_seq_filepath, 'w')
        seq_file.write(rdp_test_seqs)
        seq_file.close()
        self.files_to_remove.append(self.tmp_seq_filepath)

        self.id_to_taxonomy_file = NamedTemporaryFile(
            prefix='qiime_parallel_taxonomy_assigner_tests_id_to_taxonomy',
            suffix='.txt',
            dir=tmp_dir)
        self.id_to_taxonomy_file.write(rdp_id_to_taxonomy)
        self.id_to_taxonomy_file.seek(0)

        self.reference_seqs_file = NamedTemporaryFile(
            prefix='qiime_parallel_taxonomy_assigner_tests_ref_seqs',
            suffix='.fasta',
            dir=tmp_dir)
        self.reference_seqs_file.write(rdp_reference_seqs)
        self.reference_seqs_file.seek(0)

        jar_fp = getenv('RDP_JAR_PATH')
        jar_basename = basename(jar_fp)
        if '2.2' not in jar_basename:
            raise ApplicationError(
                "RDP_JAR_PATH does not point to version 2.2 of the "
                "RDP Classifier.")

        initiate_timeout(60)

    def tearDown(self):
        """ """
        disable_timeout()
        remove_files(self.files_to_remove)
        # remove directories last, so we don't get errors
        # trying to remove files which may be in the directories
        for d in self.dirs_to_remove:
            if exists(d):
                rmtree(d)

    def test_parallel_rdp_taxonomy_assigner(self):
        """ parallel_rdp_taxonomy_assigner functions as expected """

        params = {
            'id_to_taxonomy_fp': self.id_to_taxonomy_file.name,
            'rdp_max_memory': 1500,
            'rdp_classifier_fp': getenv('RDP_JAR_PATH'),
            'confidence': 0.80,
            'reference_seqs_fp': self.reference_seqs_file.name
        }

        app = ParallelRdpTaxonomyAssigner()
        r = app(self.tmp_seq_filepath,
                self.test_out,
                params,
                job_prefix='RDPTEST',
                poll_directly=True,
                suppress_submit_jobs=False)
        results = fields_to_dict(
            open(glob(join(self.test_out, '*_tax_assignments.txt'))[0], 'U'))
        # some basic sanity checks: we should get the same number of sequences
        # as our input with the same seq IDs. We should have a taxonomy string
        # and a confidence value for each seq as well.
        self.assertEqual(len(results), 2)
        self.assertEqual(len(results['X67228']), 2)
        self.assertEqual(len(results['EF503697']), 2)
Ejemplo n.º 51
0
print("CWD: {}".format(Path.cwd()))

print("Creating temporary file.")
temp_file = NamedTemporaryFile(mode="w+", delete=False)  # type: TextIOWrapper
print("Temporary file at: {}".format(temp_file.name))

print("Loading all programs' tokens")
connection = sqlite3.connect(str(database_path))
cursor = connection.cursor()
rows = cursor.execute("SELECT tokens FROM tagger").fetchall()

print("Loading programs into temporary file")
for sentence in rows:
    sentence = json.loads(sentence[0])
    for token in sentence:  # type: str
        temp_file.write(" " + token)

temp_file.seek(0)
print("Make fast-text model.")
model = fastText.train_unsupervised(
    input=temp_file.name,
    #lr=0.1,
    epoch=500,
    minCount=4,
    model="skipgram",
    thread=18)

# Save model
model.save_model(str(Path(storage_folder, "model.bin")))
import pandas as pd
from tempfile import NamedTemporaryFile
from os.path import getsize

np.random.seed(42)
a = np.random.randn(365, 4)

# 임시파일로 저장하고 크기 확인
tmpf = NamedTemporaryFile()
np.savetxt(tmpf, a, delimiter=',')
print("Size CSV file", getsize(tmpf.name))

# .npy포맷으로 저장해 불러오고, 형태와 크기를 확인
tmpf = NamedTemporaryFile()
np.save(tmpf, a)
tmpf.seek(0)
loaded = np.load(tmpf)
print("Shape", loaded.shape)
print("Size .npy file", getsize(tmpf.name))

# 데이터프레임의 피클 포맷으로 만들고 읽어오기
df = pd.DataFrame(a)
print(df)
df.to_pickle('tmpf.pkl')
print("Size pickled dataframe", getsize('tmpf.pkl'))
print("DF from pickle\n", pd.read_pickle('tmpf.pkl'))

## 3. PyTables와 데이터 저장
# pip install numexpr tables - HDF5 라이브러리(데이터를 그룹과 데이터셋 형태로 구조화)

import numpy as np
Ejemplo n.º 53
0
editor = os.environ['EDITOR']

# Open the envrionment
env = Environment('/var/trac/helpdesk')

# Grab the page model
page = WikiPage(env, name)

# Make a temporary file
safename = name.translate(string.maketrans('/', '_'))
file = NamedTemporaryFile(mode='w+', prefix=safename, suffix='.txt')

# If the page exists, populate the tempfile
if page.exists:
    file.write(page.text)
    file.flush()

# Open the file in $EDITOR
os.spawnlp(os.P_WAIT, editor, editor, file.name)

# Reread the text
file.seek(0)
page.text = file.read()

# Save the file back
try:
    page.save(author=os.getlogin(), comment='', remote_addr='127.0.0.1')
    print 'Page changed succesfully'
except TracError, e:
    print 'Error: %s' % e.message
Ejemplo n.º 54
0
class Converter:
    def __init__(self, version="", shapes=False, clear_shape_errors=True):
        clear_directory("gtfs")
        if clear_shape_errors: clear_directory("shape-errors")

        # Stop info
        self.missing_stops = requests.get(
            "https://gist.githubusercontent.com/MKuranowski/0ca97a012d541899cb1f859cd0bab2e7/raw/missing_stops.json"
        ).json()
        self.rail_platforms = requests.get(
            "https://gist.githubusercontent.com/MKuranowski/0ca97a012d541899cb1f859cd0bab2e7/raw/rail_platforms.json"
        ).json()

        self.incorrect_stops = []
        self.unused_stops = list(self.missing_stops.keys())
        self.stops_map = {}

        self.stop_names = PROPER_STOP_NAMES.copy()

        # File handler
        self.version = None
        self.reader = None
        self.parser = None

        # Get shape generator instance
        if isinstance(shapes, Shaper):
            self.shapes = shapes
            self.shapes.open()

        elif shapes:
            self.shapes = Shaper()
            self.shapes.open()

        else:
            self.shapes = None

        self.get_file(version)

    def get_file(self, version):
        "Download and decompress schedules for current data. Returns tuple (TemporaryFile, version) - and that TemporaryFile is decompressed .TXT file"
        # Login to ZTM server and get the list of files
        server = FTP("rozklady.ztm.waw.pl")
        server.login()
        files = [f for f in server.nlst() if re.fullmatch(r"RA\d{6}\.7z", f)]

        # If user has requested an exact version, check if it's on the server
        if version:
            fname = "{}.7z".format(version)
            if fname not in files:
                raise KeyError(
                    "Requested file version ({}) not found on ZTM server".
                    format(version))

        # If not, find one valid today
        else:
            fdate = date.today()
            while True:
                fname = fdate.strftime("RA%y%m%d.7z")
                if fname in files: break
                else: fdate -= timedelta(days=1)

        # Create temporary files for storing th 7z archive and the compressed TXT file
        temp_arch = NamedTemporaryFile(mode="w+b", delete=False)
        self.reader = NamedTemporaryFile(mode="w+t", delete=True)

        try:
            # Download the file
            server.retrbinary("RETR " + fname, temp_arch.write)
            server.quit()
            temp_arch.close()

            # Open the temporary archive inside
            with libarchive.public.file_reader(temp_arch.name) as arch:

                # Iterate over each file inside the archive
                for arch_file in arch:

                    # Assert the file inside the archive is the TXT file we're looking for
                    name_match = re.fullmatch(r"(RA\d{6})\.TXT",
                                              arch_file.pathname,
                                              flags=re.IGNORECASE)
                    if not name_match:
                        continue

                    # Save the feed version
                    self.version = name_match[1].upper()

                    # Decompress the TXT file block by block and save it to the reader
                    for block in arch_file.get_blocks():
                        self.reader.write(str(block, "cp1250"))
                    self.reader.seek(0)

                    # only one TXT file should be inside the archive
                    break

                else:
                    raise FileNotFoundError(
                        "no schedule file found inside archive {}".format(
                            fname))

        # Remove the temp arch file at the end
        finally:
            os.remove(temp_arch.name)

        self.parser = Parser(self.reader)

    def calendar(self):
        file = open("gtfs/calendar_dates.txt",
                    mode="w",
                    encoding="utf8",
                    newline="")
        writer = csv.writer(file)
        writer.writerow(["service_id", "date", "exception_type"])

        print("\033[1A\033[K" + "Parsing calendars (KA)")

        for day in self.parser.parse_ka():
            for service_id in day["services"]:
                writer.writerow([service_id, day["date"], "1"])

        file.close()

    def _stopgroup_railway(self, writer, group_id, group_name):
        # Load ZTM stakes from PR section
        # self.parser.parse_pr() has to be called to skip to the next entry in ZP
        stakes = list(self.parser.parse_pr())

        # If group is not in ACTIVE_RAIL_STATIONS, ignore it
        if group_id not in ACTIVE_RAIL_STATIONS:
            for s in stakes:
                self.stops_map[s["id"]] = None
            return

        # Basic info about the station
        station_info = self.rail_platforms.get(group_id, {})

        # If this station is not in rail_platforms, average all stake positions
        # In order to calculate an approx position of the station
        if not station_info:
            stake_positions = [(i["lat"], i["lon"]) for i in stakes]
            stake_positions = [i for i in stake_positions if i[0] and i[1]]

            if stake_positions:
                station_lat, station_lon = avg_position(stake_positions)

            # No position for the station
            else:
                for s in stakes:
                    self.stops_map[s["id"]] = None
                self.incorrect_stops.append(group_id)
                return

        # Otherwise get the position from rail_platforms data
        else:
            station_lat, station_lon = map(float,
                                           station_info["pos"].split(","))
            group_name = station_info["name"]

        # One Platform or No Platform data
        if (not station_info) or station_info["oneplatform"]:
            # Save position for shapes
            if self.shapes:
                self.shapes.stops[group_id] = station_lat, station_lon

            # Add info for stops_map
            for stake in stakes:
                self.stops_map[stake["id"]] = group_id

            # Output info to GTFS
            writer.writerow([
                group_id,
                group_name,
                station_lat,
                station_lon,
                "",
                "",
                station_info.get("ibnr_code", ""),
                "",
                station_info.get("wheelchair", 0),
            ])

        # Multi-Platform station
        else:
            # Hub entry
            writer.writerow([
                group_id,
                group_name,
                station_lat,
                station_lon,
                "1",
                "",
                station_info["ibnr_code"],
                "",
                station_info.get("wheelchair", 0),
            ])

            # Platforms
            for platform_id, platform_pos in station_info["platforms"].items():
                platform_lat, platform_lon = map(float,
                                                 platform_pos.split(","))
                platform_code = platform_id.split("p")[1]
                platform_name = f"{group_name} peron {platform_code}"

                # Save position for shapes
                if self.shapes:
                    self.shapes.stops[platform_id] = platform_lat, platform_lon

                # Output to GTFS
                writer.writerow([
                    platform_id,
                    platform_name,
                    platform_lat,
                    platform_lon,
                    "0",
                    group_id,
                    station_info["ibnr_code"],
                    platform_code,
                    station_info.get("wheelchair", 0),
                ])

            # Stops → Platforms
            for stake in stakes:
                # Defined stake in rail_platforms
                if stake["id"] in station_info["stops"]:
                    self.stops_map[stake["id"]] = station_info["stops"][
                        stake["id"]]

                # Unknown stake
                elif stake["id"] not in {"491303", "491304"}:
                    warn(
                        f'No platform defined for railway PR entry {group_name} {stake["id"]}'
                    )

    def _stopgroup_normal(self, writer, group_id, group_name):
        # Load ZTM stakes from PR section
        # self.parser.parse_pr() has to be called to skip to the next entry in ZP
        stakes = list(self.parser.parse_pr())

        # Split virtual stakes from normal stakes
        virtual_stakes = [i for i in stakes if i["code"][0] == "8"]
        normal_stakes = [i for i in stakes if i["code"][0] != "8"]

        # Load positions from missing_stops to normal_stakes
        for idx, stake in enumerate(normal_stakes):
            if (stake["lat"] == None or stake["lon"] == None) and \
                                          stake["id"] in self.missing_stops:

                self.unused_stops.remove(stake["id"])
                stake["lat"], stake["lon"] = self.missing_stops[stake["id"]]
                normal_stakes[idx] = stake

        position_stakes = [i for i in normal_stakes if i["lat"] and i["lon"]]

        # Convert normal stakes
        for stake in normal_stakes:

            # Position defined
            if stake["lat"] and stake["lon"]:

                # Save position for shapes
                if self.shapes:
                    self.shapes.stops[stake["id"]] = stake["lat"], stake["lon"]

                # Output info to GTFS
                writer.writerow([
                    stake["id"],
                    f'{group_name} {stake["code"]}',
                    stake["lat"],
                    stake["lon"],
                    "",
                    "",
                    "",
                    "",
                    stake["wheelchair"],
                ])

            # Position undefined
            else:
                self.stops_map[stake["id"]] = None
                self.incorrect_stops.append(stake["id"])

        # Convert virtual stops
        for stake in virtual_stakes:

            stakes_with_same_pos = [i["id"] for i in position_stakes if \
                           (i["lat"], i["lon"]) == (stake["lat"], stake["lon"])]

            stakes_with_same_code = [i["id"] for i in position_stakes if \
                                               i["code"][1] == stake["code"][1]]

            # Metro Młociny 88 → Metro Młociny 28
            if stake["id"] == "605988":
                counterpart_available = [i for i in position_stakes if \
                                                            i["id"] == "605928"]

                # If 605928 is present, map 605988 to it.
                # Otherwise fallback on defualt maching options
                if counterpart_available:
                    self.stops_map["605988"] = "605928"
                    continue

            # Map to a stake with same position
            if stakes_with_same_pos:
                self.stops_map[stake["id"]] = stakes_with_same_pos[0]

            # Map to a stake with same digit
            elif stakes_with_same_code:
                self.stops_map[stake["id"]] = stakes_with_same_code[0]

            # Unable find a matching stake
            else:
                self.stops_map[stake["id"]] = None
                self.incorrect_stops.append(stake["id"])

    def stops(self):
        file = open("gtfs/stops.txt", mode="w", encoding="utf8", newline="")
        writer = csv.writer(file)
        writer.writerow([
            "stop_id", "stop_name", "stop_lat", "stop_lon", "location_type",
            "parent_station", "stop_IBNR", "platform_code",
            "wheelchair_boarding"
        ])

        print("\033[1A\033[K" + "Parsing stops (ZP)")

        for group in self.parser.parse_zp():
            # Fix town name for Kampinoski PN
            if group["town"] == "Kampinoski Pn":
                group["town"] = "Kampinoski PN"

            # Add name to self.stop_names if it's missing
            if group["id"] not in self.stop_names:
                group["name"] = normal_stop_name(group["name"])
                self.stop_names[group["id"]] = group["name"]

            else:
                group["name"] = self.stop_names[group["id"]]

            # Add town name to stop name
            if should_town_be_added_to_name(group):
                group["name"] = f'{group["town"]} {group["name"]}'
                self.stop_names[group["id"]] = group["name"]

            # Parse stakes
            if group["id"][1:3] in {"90", "91", "92"}:
                self._stopgroup_railway(writer, group["id"], group["name"])

            else:
                self._stopgroup_normal(writer, group["id"], group["name"])

        file.close()

    def routes_schedules(self):
        file_routes = open("gtfs/routes.txt",
                           mode="w",
                           encoding="utf8",
                           newline="")
        writer_routes = csv.writer(file_routes)
        writer_routes.writerow([
            "agency_id", "route_id", "route_short_name", "route_long_name",
            "route_type", "route_color", "route_text_color", "route_sort_order"
        ])

        file_trips = open("gtfs/trips.txt",
                          mode="w",
                          encoding="utf8",
                          newline="")
        writer_trips = csv.writer(file_trips)
        writer_trips.writerow([
            "route_id", "service_id", "trip_id", "trip_headsign",
            "direction_id", "shape_id", "exceptional", "wheelchair_accessible",
            "bikes_allowed"
        ])

        file_times = open("gtfs/stop_times.txt",
                          mode="w",
                          encoding="utf8",
                          newline="")
        writer_times = csv.writer(file_times)
        writer_times.writerow([
            "trip_id", "arrival_time", "departure_time", "stop_id",
            "stop_sequence", "pickup_type", "drop_off_type",
            "shape_dist_traveled"
        ])

        route_sort_order = 1  # Leave first 2 blank for M1 and M2 routes
        route_id = None

        print("\033[1A\033[K" + "Parsing routes & schedules (LL)")

        for route in self.parser.parse_ll():
            route_id, route_desc = route["id"], route["desc"]

            # Ignore Koleje Mazowieckie & Warszawska Kolej Dojazdowa routes
            if route_id.startswith("R") or route_id.startswith("WKD"):
                self.parser.skip_to_section("WK", end=True)
                continue

            print("\033[1A\033[K" +
                  f"Parsing routes & schedules (LL) - {route_id}")

            route_sort_order += 1
            route_type, route_color, route_text_color = route_color_type(
                route_id, route_desc)

            # Data loaded from TR section
            route_name = ""
            direction_stops = {"0": set(), "1": set()}
            on_demand_stops = set()
            inaccesible_trips = set()
            variant_directions = {}

            # Variants
            print("\033[1A\033[K" +
                  f"Parsing routes & schedules (TR) - {route_id}")

            for variant in self.parser.parse_tr():
                print("\033[1A\033[K" +
                      f"Parsing routes & schedules (LW) - {route_id}")

                stops = list(self.parser.parse_lw())

                # variant direction
                variant_directions[variant["id"]] = variant["direction"]

                # route_name should be the name of first and last stop of 1st variant
                if not route_name:
                    route_name = " — ".join([
                        self.stop_names[stops[0]["id"][:4]],
                        self.stop_names[stops[-1]["id"][:4]]
                    ])

                # add on_demand_stops from this variant
                on_demand_stops |= {i["id"] for i in stops if i["on_demand"]}

                # add stopids to proper direction in direction_stops
                direction_stops[variant["direction"]] |= {
                    i["id"]
                    for i in stops
                }

                # now parse ODWG sections - for inaccesible trips (only tram)
                if route_type == "0":
                    print("\033[1A\033[K" +
                          f"Parsing routes & schedules (TD) - {route_id}")

                    for trip in self.parser.parse_wgod(route_type, route_id):
                        if not trip["accessible"]:
                            inaccesible_trips.add(trip["id"])

                else:
                    self.parser.skip_to_section("RP", end=True)

            # Schedules
            print("\033[1A\033[K" +
                  f"Parsing routes & schedules (WK) - {route_id}")

            for trip in self.parser.parse_wk(route_id):

                # Change stop_ids based on stops_map
                for stopt in trip["stops"]:
                    stopt["orig_stop"] = stopt.pop("stop")
                    stopt["stop"] = self.stops_map.get(stopt["orig_stop"],
                                                       stopt["orig_stop"])

                # Fliter "None" stops
                trip["stops"] = [i for i in trip["stops"] if i["stop"]]

                # Ignore trips with only 1 stopt
                if len(trip["stops"]) < 2: continue

                # Unpack info from trip_id
                trip_id = trip["id"]

                trip_id_split = trip_id.split("/")
                variant_id = trip_id_split[1]
                service_id = trip_id_split[2]

                del trip_id_split

                # "Exceptional" trip - a deutor/depot run
                if variant_id.startswith("TP-") or variant_id.startswith(
                        "TO-"):
                    exceptional = "0"
                else:
                    exceptional = "1"

                # Shapes
                if self.shapes:
                    shape_id, shape_distances = self.shapes.get(
                        route_type, trip_id,
                        [i["stop"] for i in trip["stops"]])

                else:
                    shape_id, shape_distances = "", {}

                # Wheelchair Accessibility
                if trip_id in inaccesible_trips:
                    wheelchair = "2"
                else:
                    wheelchair = "1"

                # Direction
                if variant_id in variant_directions:
                    direction = variant_directions[variant_id]
                else:
                    direction = trip_direction(
                        {i["orig_stop"]
                         for i in trip["stops"]}, direction_stops)

                    variant_directions[variant_id] = direction

                # Headsign
                headsign = proper_headsign(
                    trip["stops"][-1]["stop"],
                    self.stop_names.get(trip["stops"][-1]["stop"][:4], ""))

                if not headsign:
                    warn(f"No headsign for trip {trip_id}")

                # Write to trips.txt
                writer_trips.writerow([
                    route_id,
                    service_id,
                    trip_id,
                    headsign,
                    direction,
                    shape_id,
                    exceptional,
                    wheelchair,
                    "1",
                ])

                max_seq = len(trip["stops"]) - 1

                # StopTimes
                for seq, stopt in enumerate(trip["stops"]):
                    # Pickup Type
                    if seq == max_seq: pickup = "1"
                    elif "P" in stopt["flags"]: pickup = "1"
                    elif stopt["orig_stop"] in on_demand_stops: pickup = "3"
                    else: pickup = "0"

                    # Drop-Off Type
                    if seq == 0: dropoff = "1"
                    elif stopt["orig_stop"] in on_demand_stops: dropoff = "3"
                    else: dropoff = "0"

                    # Shape Distance
                    stop_dist = shape_distances.get(seq, "")
                    if stop_dist: stop_dist = round(stop_dist, 4)

                    # Output to stop_times.txt
                    writer_times.writerow([
                        trip_id, stopt["time"], stopt["time"], stopt["stop"],
                        seq, pickup, dropoff, stop_dist
                    ])

            # Output to routes.txt
            writer_routes.writerow([
                "0", route_id, route_id, route_name, route_type, route_color,
                route_text_color, route_sort_order
            ])

        file_routes.close()
        file_trips.close()
        file_times.close()

    def parse(self):
        self.calendar()
        self.stops()
        self.routes_schedules()

    def dump_missing_stops(self):
        with open("missing_stops.json", "w") as f:
            json.dump(
                {
                    "missing": [int(i) for i in self.incorrect_stops],
                    "unused": [int(i) for i in self.unused_stops],
                },
                f,
                indent=0)

    @staticmethod
    def static_files(shapes, version, download_time):
        feed_version = "Version {}; downloaded at: {}".format(
            version, download_time)

        "Create files that don't depend of ZTM file content"
        file = open("gtfs/agency.txt",
                    mode="w",
                    encoding="utf8",
                    newline="\r\n")
        file.write(
            'agency_id,agency_name,agency_url,agency_timezone,agency_lang,agency_phone,agency_fare_url\n'
        )
        file.write(
            '0,"Warszawski Transport Publiczny","https://wtp.waw.pl",Europe/Warsaw,pl,19 115,"https://www.nowa.wtp.waw.pl/ceny-i-rodzaje-biletow/"\n'
        )
        file.close()

        file = open("gtfs/feed_info.txt",
                    mode="w",
                    encoding="utf8",
                    newline="\r\n")
        file.write(
            'feed_publisher_name,feed_publisher_url,feed_lang,feed_version\n')
        if shapes:
            file.write(
                '"GTFS Convert: MKuranowski; Data: ZTM Warszawa; Bus Shapes (under ODbL License): © OpenStreetMap contributors","https://github.com/MKuranowski/WarsawGTFS",pl,{}\n'
                .format(feed_version))
        else:
            file.write(
                '"GTFS Convert: MKuranowski; Data: ZTM Warszawa","https://github.com/MKuranowski/WarsawGTFS",pl,{}\n'
                .format(feed_version))
        file.close()

    @staticmethod
    def compress(target="gtfs.zip"):
        "Compress all created files to gtfs.zip"
        with zipfile.ZipFile(target,
                             mode="w",
                             compression=zipfile.ZIP_DEFLATED) as archive:
            for file in os.listdir("gtfs"):
                if file.endswith(".txt"):
                    archive.write(os.path.join("gtfs", file), arcname=file)

    @classmethod
    def create(cls,
               version="",
               shapes=False,
               metro=False,
               prevver="",
               targetfile="gtfs.zip",
               clear_shape_errors=True):
        print("\033[1A\033[K" + "Downloading file")
        download_time = datetime.today().strftime("%Y-%m-%d %H:%M:%S")
        self = cls(version, shapes)

        if prevver == self.version:
            self.reader.close()
            print("\033[1A\033[K" +
                  "File matches the 'prevver' argument, aborting!")
            return

        print("\033[1A\033[K" + "Starting parser...")
        self.parse()

        print("\033[1A\033[K" + "Parser finished working, closing TXT file")
        self.reader.close()
        self.dump_missing_stops()

        print("\033[1A\033[K" + "Creating static files")
        self.static_files(bool(self.shapes), self.version, download_time)

        if metro:
            print("\033[1A\033[K" + "Adding metro")
            Metro.add()

        print("\033[1A\033[K" + "Compressing")
        self.compress(targetfile)

        return self.version
Ejemplo n.º 55
0
    def post(self, request):
        file_content = None
        tmp_file = None
        try:
            file_content = request.raw_post_data
            file_errors, file_extension = _perform_file_validations(request)
            tmp_file = NamedTemporaryFile(delete=True, suffix=file_extension)
            if file_errors:
                logger.info(
                    "User: %s. Upload File validation failed: %s. File name: %s, size: %d",
                    request.user.username, json.dumps(file_errors),
                    request.GET.get("qqfile"),
                    int(request.META.get('CONTENT_LENGTH')))

                return HttpResponse(json.dumps({
                    'success': False,
                    'error_msg': file_errors
                }),
                                    content_type='application/json')

            tmp_file.write(file_content)
            tmp_file.seek(0)

            project_name = request.GET['pname'].strip()
            manager = get_database_manager(request.user)
            questionnaire_code = generate_questionnaire_code(manager)

            xls_parser_response = XlsFormParser(tmp_file, project_name,
                                                manager).parse()
            if xls_parser_response.errors:
                error_list = list(xls_parser_response.errors)
                logger.info("User: %s. Upload Errors: %s",
                            request.user.username, json.dumps(error_list))

                return HttpResponse(
                    content_type='application/json',
                    content=json.dumps({
                        'success':
                        False,
                        'error_msg':
                        error_list,
                        'message_prefix':
                        _("Sorry! Current version of DataWinners does not support"
                          ),
                        'message_suffix':
                        _("Update your XLSForm and upload again.")
                    }))
            tmp_file.seek(0)
            mangrove_service = MangroveService(
                request,
                questionnaire_code=questionnaire_code,
                project_name=project_name,
                xls_form=tmp_file,
                xls_parser_response=xls_parser_response)
            questionnaire_id, form_code = mangrove_service.create_project()

        except PyXFormError as e:
            logger.info("User: %s. Upload Error: %s", request.user.username,
                        e.message)

            message = transform_error_message(e.message)
            if 'name_type_error' in message or 'choice_name_type_error' in message:
                if 'choice_name_type_error' in message:
                    message_prefix = _(
                        "On your \"choices\" sheet the first and second column must be \"list_name\" and \"name\".  Possible errors:"
                    )
                else:
                    message_prefix = _(
                        "On your \"survey\" sheet the first and second column must be \"type\" and \"name\".  Possible errors:"
                    )
                return HttpResponse(
                    content_type='application/json',
                    content=json.dumps({
                        'success':
                        False,
                        'error_msg': [
                            _("Columns are missing"),
                            _("Column name is misspelled"),
                            _("Additional space in column name")
                        ],
                        'message_prefix':
                        message_prefix,
                        'message_suffix':
                        _("Update your XLSForm and upload again.")
                    }))
            else:
                return HttpResponse(
                    content_type='application/json',
                    content=json.dumps({
                        'success':
                        False,
                        'error_msg': [
                            message if message else ugettext(
                                "all XLSForm features. Please check the list of unsupported features."
                            )
                        ]
                    }))

        except QuestionAlreadyExistsException as e:
            logger.info("User: %s. Upload Error: %s", request.user.username,
                        e.message)

            return HttpResponse(
                content_type='application/json',
                content=json.dumps({
                    'success':
                    False,
                    'error_msg': [
                        _("Duplicate labels. All questions (labels) must be unique."
                          )
                    ],
                    'message_prefix':
                    _("Sorry! Current version of DataWinners does not support"
                      ),
                    'message_suffix':
                    _("Update your XLSForm and upload again.")
                }))

        except UnicodeDecodeError as e:
            logger.info("User: %s. Upload Error: %s", request.user.username,
                        e.message)

            return HttpResponse(
                content_type='application/json',
                content=json.dumps({
                    'success':
                    False,
                    'error_msg': [
                        _("Check your columns for errors.<br>There are missing symbols (like $ for relevant or calculate) or incorrect characters<br>"
                          ) + _("Update your XLSForm and upload again.")
                    ],
                }))

        except Exception as e:

            message = e.message if e.message else _("Errors in excel")

            logger.info("User: %s. Upload Exception message: %s",
                        request.user.username, e.message)

            odk_message = ''
            if not 'ODK Validate Errors:' in e.message:
                send_email_on_exception(
                    request.user,
                    "Questionnaire Create",
                    traceback.format_exc(),
                    additional_details={'file_contents': file_content})
            else:
                odk_message = translate_odk_message(e.message)
            message = odk_message if odk_message else message
            return HttpResponse(content_type='application/json',
                                content=json.dumps({
                                    'success': False,
                                    'error_msg': [message],
                                }))

        finally:

            if tmp_file:
                tmp_file.close()

        if not questionnaire_id:
            return HttpResponse(json.dumps({
                'success':
                False,
                'duplicate_project_name':
                True,
                'error_msg': [
                    _("Questionnaire with same name already exists.Upload was cancelled."
                      )
                ]
            }),
                                content_type='application/json')

        return HttpResponse(json.dumps({
            "success": True,
            "project_name": project_name,
            "project_id": questionnaire_id,
            "form_code": form_code
        }),
                            content_type='application/json')
Ejemplo n.º 56
0
def main(parser: COP, options: 'Values', reg: str) -> None:
    workflow, flow_file = parse_reg(reg, src=True)

    if options.geditor:
        editor = glbl_cfg().get(['editors', 'gui'])
    else:
        editor = glbl_cfg().get(['editors', 'terminal'])

    # read in the flow.cylc file
    viewcfg = {
        'mark': options.mark,
        'single': options.single,
        'label': options.label,
        'empy': options.empy or options.process,
        'jinja2': options.jinja2 or options.process,
        'contin': options.cat or options.process,
        'inline': (options.inline or options.jinja2 or options.empy
                   or options.process),
    }
    lines = read_and_proc(
        flow_file,
        load_template_vars(options.templatevars, options.templatevars_file),
        viewcfg=viewcfg)

    if options.stdout:
        for line in lines:
            print(line)
        sys.exit(0)

    # write to a temporary file
    viewfile = NamedTemporaryFile(
        suffix=".flow.cylc", prefix=workflow.replace('/', '_') + '.',
    )
    for line in lines:
        viewfile.write((line + '\n').encode())
    viewfile.seek(0, 0)

    # set the file to be read only
    os.chmod(viewfile.name, 0o400)

    # capture the temp file's mod time in case the user edits it
    # and overrides the readonly mode.
    modtime1 = os.stat(viewfile.name).st_mtime

    # in case editor has options, e.g. 'emacs -nw':
    command_list = shlex.split(editor)
    command_list.append(viewfile.name)
    command = ' '.join(command_list)
    # THIS BLOCKS UNTIL THE COMMAND COMPLETES
    retcode = call(command_list)
    if retcode != 0:
        # the command returned non-zero exist status
        raise CylcError(f'{command} failed: {retcode}')

    # !!!VIEWING FINISHED!!!

    # Did the user edit the file
    modtime2 = os.stat(viewfile.name).st_mtime

    if modtime2 > modtime1:
        print(
            "\nWARNING: YOU HAVE EDITED A TEMPORARY READ-ONLY COPY "
            f"OF THE WORKFLOW:\n   {viewfile.name}\n",
            file=sys.stderr
        )
    # DONE
    viewfile.close()
Ejemplo n.º 57
0
def temp_imagefile(width, height, format):
    i = image(width, height)
    f = NamedTemporaryFile(suffix=IMG_SUFFIX[format])
    i.save(f, format)
    f.seek(0)
    return f
Ejemplo n.º 58
0
        if len(item_rows):
            rows.append(order_row)
            rows.append(date_row)
            rows.append('')
            rows.append(item_header)
            for row in item_rows:
                rows.append(row)
            rows.append(total_row)
            rows.append('')
    rows.append('')

fp = NamedTemporaryFile(suffix=".csv")
writer = UnicodeWriter(fp)
writer.writerows(rows)
fp.seek(0)
output = fp.read()

# Send file to Mimic as attachment
body = 'Attached is the Trillium report for orders placed during the week ending %s.' % friday.strftime(
    "%B %d, %Y")
subject = '[Mimic OOS] Weekly Trillium report'
email = EmailMessage(
    subject,
    body,
    '*****@*****.**',
    [
        '*****@*****.**',
    ],
)
email.attach_file(fp.name)
Ejemplo n.º 59
0
def create_invoice_excel(reference_invoice):
    package_path = os.path.dirname(__file__) + '/..'
    invoice_wb = openpyxl.open(f'{package_path}/templates/invoice_template.xlsx')
    invoice_dict = reference_invoice.to_dict()
    order_products = get_invoice_order_products(reference_invoice)
    total = reduce(lambda acc, op: acc + op['subtotal'], order_products, 0)
    ws = invoice_wb.worksheets[0]
    pl = invoice_wb.worksheets[1]

    # Set invoice header
    ws.cell(7, 2, reference_invoice.id)
    ws.cell(7, 5, reference_invoice.when_created)
    ws.cell(13, 4, reference_invoice.customer)
    ws.cell(15, 2, reference_invoice.payee)
    ws.cell(17, 4, reference_invoice.orders[0].address)
    ws.cell(21, 4, '') # city
    ws.cell(23, 5, reference_invoice.orders[0].country.name)
    ws.cell(25, 4, reference_invoice.orders[0].phone)

    # Set packing list header
    pl.cell(7, 2, reference_invoice.id)
    pl.cell(7, 5, reference_invoice.when_created)
    pl.cell(13, 4, reference_invoice.customer)
    ws.cell(15, 2, reference_invoice.payee)
    pl.cell(17, 4, reference_invoice.orders[0].address)
    pl.cell(21, 4, '') # city
    pl.cell(23, 5, reference_invoice.orders[0].country.name)
    pl.cell(25, 4, reference_invoice.orders[0].phone)

    # Set invoice footer
    ws.cell(305, 5, total)
    ws.cell(311, 4, f"{round(total, 2)} USD")
    ws.cell(312, 2, f"{invoice_dict['weight']}g")

    # Set packing list footer
    pl.cell(311, 4, f"{reduce(lambda qty, op: qty + op['quantity'], order_products, 0)}psc")
    pl.cell(312, 2, f"{invoice_dict['weight']}g")

    # Set order product lines
    row = 31
    last_row = 304

    for op in order_products:
        # Set invoice product item
        ws.cell(row, 1, op['id'])
        ws.cell(row, 2, op['name'])
        ws.cell(row, 3, op['quantity'])
        ws.cell(row, 4, op['price'])
        ws.cell(row, 5, op['subtotal'])

        # Set packing list product item
        pl.cell(row, 1, op['id'])
        pl.cell(row, 2, op['name'])
        pl.cell(row, 4, op['quantity'])

        row += 1
    ws.delete_rows(row, last_row - row + 1)
    pl.delete_rows(row, last_row - row + 1)
    file = NamedTemporaryFile()
    invoice_wb.save(file.name)
    file.seek(0)
    return file
Ejemplo n.º 60
0
    def post(self, request, project_id):
        manager = get_database_manager(request.user)
        questionnaire = Project.get(manager, project_id)
        file_content = None
        tmp_file = None
        try:
            file_content = request.raw_post_data

            file_errors, file_extension = _perform_file_validations(request)
            tmp_file = NamedTemporaryFile(delete=True, suffix=file_extension)

            if file_errors:
                logger.info(
                    "User: %s. Edit upload File validation failed: %s. File name: %s, size: %d",
                    request.user.username, json.dumps(file_errors),
                    request.GET.get("qqfile"),
                    int(request.META.get('CONTENT_LENGTH')))

                return HttpResponse(content_type='application/json',
                                    content=json.dumps({
                                        'success': False,
                                        'error_msg': file_errors
                                    }))

            tmp_file.write(file_content)
            tmp_file.seek(0)

            xls_parser_response = XlsFormParser(tmp_file, questionnaire.name,
                                                manager).parse()

            if xls_parser_response.errors:
                info_list = list(xls_parser_response.errors)
                logger.info("User: %s. Edit upload Errors: %s",
                            request.user.username, json.dumps(info_list))

                return HttpResponse(
                    content_type='application/json',
                    content=json.dumps({
                        'success':
                        False,
                        'error_msg':
                        info_list,
                        'message_prefix':
                        _("Sorry! Current version of DataWinners does not support"
                          ),
                        'message_suffix':
                        _("Update your XLSForm and upload again.")
                    }))

            mangrove_service = MangroveService(
                request,
                questionnaire_code=questionnaire.form_code,
                project_name=questionnaire.name,
                xls_parser_response=xls_parser_response)

            questionnaire.xform = mangrove_service.xform_with_form_code
            QuestionnaireBuilder(questionnaire,
                                 manager).update_questionnaire_with_questions(
                                     xls_parser_response.json_xform_data)

            tmp_file.seek(0)
            questionnaire.update_media_field_flag()
            questionnaire.save(process_post_update=False)

            base_name, extension = os.path.splitext(tmp_file.name)
            questionnaire.update_attachments(tmp_file,
                                             'questionnaire%s' % extension)
            self._purge_submissions(manager, questionnaire)
            self._purge_feed_documents(questionnaire, request)
            self._purge_media_details_documents(manager, questionnaire)
            self.recreate_submissions_mapping(manager, questionnaire)
            if xls_parser_response.info:
                info_list = list(xls_parser_response.info)
                logger.info("User: %s. Edit upload Errors: %s",
                            request.user.username, json.dumps(info_list))
                return HttpResponse(content_type='application/json',
                                    content=json.dumps({
                                        'success':
                                        True,
                                        'information':
                                        info_list,
                                    }))
        except PyXFormError as e:
            logger.info("User: %s. Upload Error: %s", request.user.username,
                        e.message)

            message = transform_error_message(e.message)
            if 'name_type_error' in message or 'choice_name_type_error' in message:
                if 'choice_name_type_error' in message:
                    message_prefix = _(
                        "On your \"choices\" sheet the first and second column must be \"list_name\" and \"name\".  Possible errors:"
                    )
                else:
                    message_prefix = _(
                        "On your \"survey\" sheet the first and second column must be \"type\" and \"name\".  Possible errors:"
                    )
                return HttpResponse(
                    content_type='application/json',
                    content=json.dumps({
                        'success':
                        False,
                        'error_msg': [
                            _("Columns are missing"),
                            _("Column name is misspelled"),
                            _("Additional space in column name")
                        ],
                        'message_prefix':
                        message_prefix,
                        'message_suffix':
                        _("Update your XLSForm and upload again.")
                    }))
            else:
                return HttpResponse(
                    content_type='application/json',
                    content=json.dumps({
                        'success':
                        False,
                        'error_msg': [
                            message if message else ugettext(
                                "all XLSForm features. Please check the list of unsupported features."
                            )
                        ]
                    }))

        except QuestionAlreadyExistsException as e:
            logger.info("User: %s. Upload Error: %s", request.user.username,
                        e.message)

            return HttpResponse(
                content_type='application/json',
                content=json.dumps({
                    'success':
                    False,
                    'error_msg': [
                        _("Duplicate labels. All questions (labels) must be unique."
                          )
                    ],
                    'message_prefix':
                    _("Sorry! Current version of DataWinners does not support"
                      ),
                    'message_suffix':
                    _("Update your XLSForm and upload again.")
                }))

        except UnicodeDecodeError as e:
            logger.info("User: %s. Upload Error: %s", request.user.username,
                        e.message)

            return HttpResponse(
                content_type='application/json',
                content=json.dumps({
                    'success':
                    False,
                    'error_msg': [
                        _("Check your columns for errors.<br>There are missing symbols (like $ for relevant or calculate) or incorrect characters<br>"
                          ) + _("Update your XLSForm and upload again.")
                    ],
                }))

        except Exception as e:

            logger.info("User: %s. Edit Upload Exception message: %s",
                        request.user.username, e.message)

            message = e.message if e.message else _("Some error in excel")
            odk_message = ''
            if not 'ODK Validate Errors:' in e.message:
                send_email_on_exception(
                    request.user,
                    "Questionnaire Edit",
                    traceback.format_exc(),
                    additional_details={'file_contents': file_content})
            else:
                odk_message = translate_odk_message(e.message)
            message = odk_message if odk_message else message
            return HttpResponse(content_type='application/json',
                                content=json.dumps({
                                    'error_msg': [message],
                                    'success': False,
                                }))

        finally:
            if tmp_file:
                tmp_file.close()

        return HttpResponse(
            json.dumps({
                "success":
                True,
                "project_name":
                questionnaire.name,
                "project_id":
                questionnaire.id,
                "file_name":
                "%s%s" % (slugify(questionnaire.name), extension),
                # "xls_dict": XlsProjectParser().parse(file_content)
            }),
            content_type='application/json')